diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/_meta.json b/sdk/machinelearning/azure-mgmt-machinelearningservices/_meta.json
index f4f9290aab9b..de8e81d2b76f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/_meta.json
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/_meta.json
@@ -1,11 +1,11 @@
{
- "commit": "c7daa3d35baaaabece0dbc6f731eadbe426973b9",
+ "commit": "0a54a99bd027502c8689f6abf7e9678761f6a87f",
"repository_url": "https://github.com/Azure/azure-rest-api-specs",
- "autorest": "3.9.2",
+ "autorest": "3.9.7",
"use": [
- "@autorest/python@6.4.12",
- "@autorest/modelerfour@4.24.3"
+ "@autorest/python@6.7.1",
+ "@autorest/modelerfour@4.26.2"
],
- "autorest_command": "autorest specification/machinelearningservices/resource-manager/readme.md --generate-sample=True --include-x-ms-examples-original-file=True --python --python-sdks-folder=/home/vsts/work/1/azure-sdk-for-python/sdk --use=@autorest/python@6.4.12 --use=@autorest/modelerfour@4.24.3 --version=3.9.2 --version-tolerant=False",
+ "autorest_command": "autorest specification/machinelearningservices/resource-manager/readme.md --generate-sample=True --include-x-ms-examples-original-file=True --python --python-sdks-folder=/mnt/vss/_work/1/s/azure-sdk-for-python/sdk --use=@autorest/python@6.7.1 --use=@autorest/modelerfour@4.26.2 --version=3.9.7 --version-tolerant=False",
"readme": "specification/machinelearningservices/resource-manager/readme.md"
}
\ No newline at end of file
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_configuration.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_configuration.py
index a128695e54fd..8720abd11dd0 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_configuration.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_configuration.py
@@ -29,14 +29,14 @@ class MachineLearningServicesMgmtClientConfiguration(Configuration): # pylint:
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
- :keyword api_version: Api Version. Default value is "2023-04-01". Note that overriding this
- default value may result in unsupported behavior.
+ :keyword api_version: Api Version. Default value is "2023-08-01-preview". Note that overriding
+ this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(MachineLearningServicesMgmtClientConfiguration, self).__init__(**kwargs)
- api_version: str = kwargs.pop("api_version", "2023-04-01")
+ api_version: str = kwargs.pop("api_version", "2023-08-01-preview")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_machine_learning_services_mgmt_client.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_machine_learning_services_mgmt_client.py
index 690ca6db66e4..abe89fa629d5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_machine_learning_services_mgmt_client.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_machine_learning_services_mgmt_client.py
@@ -18,6 +18,7 @@
from .operations import (
BatchDeploymentsOperations,
BatchEndpointsOperations,
+ CapacityReservationGroupsOperations,
CodeContainersOperations,
CodeVersionsOperations,
ComponentContainersOperations,
@@ -28,7 +29,15 @@
DatastoresOperations,
EnvironmentContainersOperations,
EnvironmentVersionsOperations,
+ FeaturesOperations,
+ FeaturesetContainersOperations,
+ FeaturesetVersionsOperations,
+ FeaturestoreEntityContainersOperations,
+ FeaturestoreEntityVersionsOperations,
JobsOperations,
+ LabelingJobsOperations,
+ ManagedNetworkProvisionsOperations,
+ ManagedNetworkSettingsRuleOperations,
ModelContainersOperations,
ModelVersionsOperations,
OnlineDeploymentsOperations,
@@ -49,6 +58,7 @@
RegistryModelContainersOperations,
RegistryModelVersionsOperations,
SchedulesOperations,
+ ServerlessEndpointsOperations,
UsagesOperations,
VirtualMachineSizesOperations,
WorkspaceConnectionsOperations,
@@ -64,10 +74,6 @@
class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""These APIs allow end users to operate on Azure Machine Learning Workspace resources.
- :ivar operations: Operations operations
- :vartype operations: azure.mgmt.machinelearningservices.operations.Operations
- :ivar workspaces: WorkspacesOperations operations
- :vartype workspaces: azure.mgmt.machinelearningservices.operations.WorkspacesOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.machinelearningservices.operations.UsagesOperations
:ivar virtual_machine_sizes: VirtualMachineSizesOperations operations
@@ -77,15 +83,9 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:vartype quotas: azure.mgmt.machinelearningservices.operations.QuotasOperations
:ivar compute: ComputeOperations operations
:vartype compute: azure.mgmt.machinelearningservices.operations.ComputeOperations
- :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
- :vartype private_endpoint_connections:
- azure.mgmt.machinelearningservices.operations.PrivateEndpointConnectionsOperations
- :ivar private_link_resources: PrivateLinkResourcesOperations operations
- :vartype private_link_resources:
- azure.mgmt.machinelearningservices.operations.PrivateLinkResourcesOperations
- :ivar workspace_connections: WorkspaceConnectionsOperations operations
- :vartype workspace_connections:
- azure.mgmt.machinelearningservices.operations.WorkspaceConnectionsOperations
+ :ivar capacity_reservation_groups: CapacityReservationGroupsOperations operations
+ :vartype capacity_reservation_groups:
+ azure.mgmt.machinelearningservices.operations.CapacityReservationGroupsOperations
:ivar registry_code_containers: RegistryCodeContainersOperations operations
:vartype registry_code_containers:
azure.mgmt.machinelearningservices.operations.RegistryCodeContainersOperations
@@ -146,8 +146,24 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:ivar environment_versions: EnvironmentVersionsOperations operations
:vartype environment_versions:
azure.mgmt.machinelearningservices.operations.EnvironmentVersionsOperations
+ :ivar featureset_containers: FeaturesetContainersOperations operations
+ :vartype featureset_containers:
+ azure.mgmt.machinelearningservices.operations.FeaturesetContainersOperations
+ :ivar features: FeaturesOperations operations
+ :vartype features: azure.mgmt.machinelearningservices.operations.FeaturesOperations
+ :ivar featureset_versions: FeaturesetVersionsOperations operations
+ :vartype featureset_versions:
+ azure.mgmt.machinelearningservices.operations.FeaturesetVersionsOperations
+ :ivar featurestore_entity_containers: FeaturestoreEntityContainersOperations operations
+ :vartype featurestore_entity_containers:
+ azure.mgmt.machinelearningservices.operations.FeaturestoreEntityContainersOperations
+ :ivar featurestore_entity_versions: FeaturestoreEntityVersionsOperations operations
+ :vartype featurestore_entity_versions:
+ azure.mgmt.machinelearningservices.operations.FeaturestoreEntityVersionsOperations
:ivar jobs: JobsOperations operations
:vartype jobs: azure.mgmt.machinelearningservices.operations.JobsOperations
+ :ivar labeling_jobs: LabelingJobsOperations operations
+ :vartype labeling_jobs: azure.mgmt.machinelearningservices.operations.LabelingJobsOperations
:ivar model_containers: ModelContainersOperations operations
:vartype model_containers:
azure.mgmt.machinelearningservices.operations.ModelContainersOperations
@@ -161,19 +177,41 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
azure.mgmt.machinelearningservices.operations.OnlineDeploymentsOperations
:ivar schedules: SchedulesOperations operations
:vartype schedules: azure.mgmt.machinelearningservices.operations.SchedulesOperations
+ :ivar serverless_endpoints: ServerlessEndpointsOperations operations
+ :vartype serverless_endpoints:
+ azure.mgmt.machinelearningservices.operations.ServerlessEndpointsOperations
:ivar registries: RegistriesOperations operations
:vartype registries: azure.mgmt.machinelearningservices.operations.RegistriesOperations
:ivar workspace_features: WorkspaceFeaturesOperations operations
:vartype workspace_features:
azure.mgmt.machinelearningservices.operations.WorkspaceFeaturesOperations
+ :ivar operations: Operations operations
+ :vartype operations: azure.mgmt.machinelearningservices.operations.Operations
+ :ivar workspaces: WorkspacesOperations operations
+ :vartype workspaces: azure.mgmt.machinelearningservices.operations.WorkspacesOperations
+ :ivar workspace_connections: WorkspaceConnectionsOperations operations
+ :vartype workspace_connections:
+ azure.mgmt.machinelearningservices.operations.WorkspaceConnectionsOperations
+ :ivar managed_network_settings_rule: ManagedNetworkSettingsRuleOperations operations
+ :vartype managed_network_settings_rule:
+ azure.mgmt.machinelearningservices.operations.ManagedNetworkSettingsRuleOperations
+ :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
+ :vartype private_endpoint_connections:
+ azure.mgmt.machinelearningservices.operations.PrivateEndpointConnectionsOperations
+ :ivar private_link_resources: PrivateLinkResourcesOperations operations
+ :vartype private_link_resources:
+ azure.mgmt.machinelearningservices.operations.PrivateLinkResourcesOperations
+ :ivar managed_network_provisions: ManagedNetworkProvisionsOperations operations
+ :vartype managed_network_provisions:
+ azure.mgmt.machinelearningservices.operations.ManagedNetworkProvisionsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
- :keyword api_version: Api Version. Default value is "2023-04-01". Note that overriding this
- default value may result in unsupported behavior.
+ :keyword api_version: Api Version. Default value is "2023-08-01-preview". Note that overriding
+ this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
@@ -195,21 +233,13 @@ def __init__(
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
- self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
- self.workspaces = WorkspacesOperations(self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.quotas = QuotasOperations(self._client, self._config, self._serialize, self._deserialize)
self.compute = ComputeOperations(self._client, self._config, self._serialize, self._deserialize)
- self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
- self.private_link_resources = PrivateLinkResourcesOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
- self.workspace_connections = WorkspaceConnectionsOperations(
+ self.capacity_reservation_groups = CapacityReservationGroupsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.registry_code_containers = RegistryCodeContainersOperations(
@@ -263,7 +293,21 @@ def __init__(
self.environment_versions = EnvironmentVersionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.featureset_containers = FeaturesetContainersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.features = FeaturesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.featureset_versions = FeaturesetVersionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.featurestore_entity_containers = FeaturestoreEntityContainersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.featurestore_entity_versions = FeaturestoreEntityVersionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.jobs = JobsOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.labeling_jobs = LabelingJobsOperations(self._client, self._config, self._serialize, self._deserialize)
self.model_containers = ModelContainersOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -275,10 +319,30 @@ def __init__(
self._client, self._config, self._serialize, self._deserialize
)
self.schedules = SchedulesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.serverless_endpoints = ServerlessEndpointsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.workspace_features = WorkspaceFeaturesOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
+ self.workspaces = WorkspacesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.workspace_connections = WorkspaceConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_network_settings_rule = ManagedNetworkSettingsRuleOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.private_link_resources = PrivateLinkResourcesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_network_provisions = ManagedNetworkProvisionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_serialization.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_serialization.py
index 842ae727fbbc..4bae2292227b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_serialization.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_serialization.py
@@ -662,8 +662,9 @@ def _serialize(self, target_obj, data_type=None, **kwargs):
_serialized.update(_new_attr) # type: ignore
_new_attr = _new_attr[k] # type: ignore
_serialized = _serialized[k]
- except ValueError:
- continue
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
except (AttributeError, KeyError, TypeError) as err:
msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj))
@@ -741,6 +742,8 @@ def query(self, name, data, data_type, **kwargs):
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
+ :keyword bool skip_quote: Whether to skip quote the serialized result.
+ Defaults to False.
:rtype: str
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
@@ -749,10 +752,8 @@ def query(self, name, data, data_type, **kwargs):
# Treat the list aside, since we don't want to encode the div separator
if data_type.startswith("["):
internal_data_type = data_type[1:-1]
- data = [self.serialize_data(d, internal_data_type, **kwargs) if d is not None else "" for d in data]
- if not kwargs.get("skip_quote", False):
- data = [quote(str(d), safe="") for d in data]
- return str(self.serialize_iter(data, internal_data_type, **kwargs))
+ do_quote = not kwargs.get("skip_quote", False)
+ return str(self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs))
# Not a list, regular serialization
output = self.serialize_data(data, data_type, **kwargs)
@@ -891,6 +892,8 @@ def serialize_iter(self, data, iter_type, div=None, **kwargs):
not be None or empty.
:param str div: If set, this str will be used to combine the elements
in the iterable into a combined string. Default is 'None'.
+ :keyword bool do_quote: Whether to quote the serialized result of each iterable element.
+ Defaults to False.
:rtype: list, str
"""
if isinstance(data, str):
@@ -903,9 +906,14 @@ def serialize_iter(self, data, iter_type, div=None, **kwargs):
for d in data:
try:
serialized.append(self.serialize_data(d, iter_type, **kwargs))
- except ValueError:
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
serialized.append(None)
+ if kwargs.get("do_quote", False):
+ serialized = ["" if s is None else quote(str(s), safe="") for s in serialized]
+
if div:
serialized = ["" if s is None else str(s) for s in serialized]
serialized = div.join(serialized)
@@ -950,7 +958,9 @@ def serialize_dict(self, attr, dict_type, **kwargs):
for key, value in attr.items():
try:
serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs)
- except ValueError:
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
serialized[self.serialize_unicode(key)] = None
if "xml" in serialization_ctxt:
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_vendor.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_vendor.py
index bd0df84f5319..0dafe0e287ff 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_vendor.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_vendor.py
@@ -5,8 +5,6 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from typing import List, cast
-
from azure.core.pipeline.transport import HttpRequest
@@ -16,15 +14,3 @@ def _convert_request(request, files=None):
if files:
request.set_formdata_body(files)
return request
-
-
-def _format_url_section(template, **kwargs):
- components = template.split("/")
- while components:
- try:
- return template.format(**kwargs)
- except KeyError as key:
- # Need the cast, as for some reasons "split" is typed as list[str | Any]
- formatted_components = cast(List[str], template.split("/"))
- components = [c for c in formatted_components if "{}".format(key.args[0]) not in c]
- template = "/".join(components)
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_version.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_version.py
index 2eda20789583..e5754a47ce68 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_version.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_version.py
@@ -6,4 +6,4 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-VERSION = "2.0.0b2"
+VERSION = "1.0.0b1"
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_configuration.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_configuration.py
index f012a758393b..916f34cd0131 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_configuration.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_configuration.py
@@ -29,14 +29,14 @@ class MachineLearningServicesMgmtClientConfiguration(Configuration): # pylint:
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
- :keyword api_version: Api Version. Default value is "2023-04-01". Note that overriding this
- default value may result in unsupported behavior.
+ :keyword api_version: Api Version. Default value is "2023-08-01-preview". Note that overriding
+ this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(MachineLearningServicesMgmtClientConfiguration, self).__init__(**kwargs)
- api_version: str = kwargs.pop("api_version", "2023-04-01")
+ api_version: str = kwargs.pop("api_version", "2023-08-01-preview")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_machine_learning_services_mgmt_client.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_machine_learning_services_mgmt_client.py
index 0240f80e8769..8dbe179e7b8c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_machine_learning_services_mgmt_client.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_machine_learning_services_mgmt_client.py
@@ -18,6 +18,7 @@
from .operations import (
BatchDeploymentsOperations,
BatchEndpointsOperations,
+ CapacityReservationGroupsOperations,
CodeContainersOperations,
CodeVersionsOperations,
ComponentContainersOperations,
@@ -28,7 +29,15 @@
DatastoresOperations,
EnvironmentContainersOperations,
EnvironmentVersionsOperations,
+ FeaturesOperations,
+ FeaturesetContainersOperations,
+ FeaturesetVersionsOperations,
+ FeaturestoreEntityContainersOperations,
+ FeaturestoreEntityVersionsOperations,
JobsOperations,
+ LabelingJobsOperations,
+ ManagedNetworkProvisionsOperations,
+ ManagedNetworkSettingsRuleOperations,
ModelContainersOperations,
ModelVersionsOperations,
OnlineDeploymentsOperations,
@@ -49,6 +58,7 @@
RegistryModelContainersOperations,
RegistryModelVersionsOperations,
SchedulesOperations,
+ ServerlessEndpointsOperations,
UsagesOperations,
VirtualMachineSizesOperations,
WorkspaceConnectionsOperations,
@@ -64,10 +74,6 @@
class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""These APIs allow end users to operate on Azure Machine Learning Workspace resources.
- :ivar operations: Operations operations
- :vartype operations: azure.mgmt.machinelearningservices.aio.operations.Operations
- :ivar workspaces: WorkspacesOperations operations
- :vartype workspaces: azure.mgmt.machinelearningservices.aio.operations.WorkspacesOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.machinelearningservices.aio.operations.UsagesOperations
:ivar virtual_machine_sizes: VirtualMachineSizesOperations operations
@@ -77,15 +83,9 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:vartype quotas: azure.mgmt.machinelearningservices.aio.operations.QuotasOperations
:ivar compute: ComputeOperations operations
:vartype compute: azure.mgmt.machinelearningservices.aio.operations.ComputeOperations
- :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
- :vartype private_endpoint_connections:
- azure.mgmt.machinelearningservices.aio.operations.PrivateEndpointConnectionsOperations
- :ivar private_link_resources: PrivateLinkResourcesOperations operations
- :vartype private_link_resources:
- azure.mgmt.machinelearningservices.aio.operations.PrivateLinkResourcesOperations
- :ivar workspace_connections: WorkspaceConnectionsOperations operations
- :vartype workspace_connections:
- azure.mgmt.machinelearningservices.aio.operations.WorkspaceConnectionsOperations
+ :ivar capacity_reservation_groups: CapacityReservationGroupsOperations operations
+ :vartype capacity_reservation_groups:
+ azure.mgmt.machinelearningservices.aio.operations.CapacityReservationGroupsOperations
:ivar registry_code_containers: RegistryCodeContainersOperations operations
:vartype registry_code_containers:
azure.mgmt.machinelearningservices.aio.operations.RegistryCodeContainersOperations
@@ -148,8 +148,25 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:ivar environment_versions: EnvironmentVersionsOperations operations
:vartype environment_versions:
azure.mgmt.machinelearningservices.aio.operations.EnvironmentVersionsOperations
+ :ivar featureset_containers: FeaturesetContainersOperations operations
+ :vartype featureset_containers:
+ azure.mgmt.machinelearningservices.aio.operations.FeaturesetContainersOperations
+ :ivar features: FeaturesOperations operations
+ :vartype features: azure.mgmt.machinelearningservices.aio.operations.FeaturesOperations
+ :ivar featureset_versions: FeaturesetVersionsOperations operations
+ :vartype featureset_versions:
+ azure.mgmt.machinelearningservices.aio.operations.FeaturesetVersionsOperations
+ :ivar featurestore_entity_containers: FeaturestoreEntityContainersOperations operations
+ :vartype featurestore_entity_containers:
+ azure.mgmt.machinelearningservices.aio.operations.FeaturestoreEntityContainersOperations
+ :ivar featurestore_entity_versions: FeaturestoreEntityVersionsOperations operations
+ :vartype featurestore_entity_versions:
+ azure.mgmt.machinelearningservices.aio.operations.FeaturestoreEntityVersionsOperations
:ivar jobs: JobsOperations operations
:vartype jobs: azure.mgmt.machinelearningservices.aio.operations.JobsOperations
+ :ivar labeling_jobs: LabelingJobsOperations operations
+ :vartype labeling_jobs:
+ azure.mgmt.machinelearningservices.aio.operations.LabelingJobsOperations
:ivar model_containers: ModelContainersOperations operations
:vartype model_containers:
azure.mgmt.machinelearningservices.aio.operations.ModelContainersOperations
@@ -164,19 +181,41 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
azure.mgmt.machinelearningservices.aio.operations.OnlineDeploymentsOperations
:ivar schedules: SchedulesOperations operations
:vartype schedules: azure.mgmt.machinelearningservices.aio.operations.SchedulesOperations
+ :ivar serverless_endpoints: ServerlessEndpointsOperations operations
+ :vartype serverless_endpoints:
+ azure.mgmt.machinelearningservices.aio.operations.ServerlessEndpointsOperations
:ivar registries: RegistriesOperations operations
:vartype registries: azure.mgmt.machinelearningservices.aio.operations.RegistriesOperations
:ivar workspace_features: WorkspaceFeaturesOperations operations
:vartype workspace_features:
azure.mgmt.machinelearningservices.aio.operations.WorkspaceFeaturesOperations
+ :ivar operations: Operations operations
+ :vartype operations: azure.mgmt.machinelearningservices.aio.operations.Operations
+ :ivar workspaces: WorkspacesOperations operations
+ :vartype workspaces: azure.mgmt.machinelearningservices.aio.operations.WorkspacesOperations
+ :ivar workspace_connections: WorkspaceConnectionsOperations operations
+ :vartype workspace_connections:
+ azure.mgmt.machinelearningservices.aio.operations.WorkspaceConnectionsOperations
+ :ivar managed_network_settings_rule: ManagedNetworkSettingsRuleOperations operations
+ :vartype managed_network_settings_rule:
+ azure.mgmt.machinelearningservices.aio.operations.ManagedNetworkSettingsRuleOperations
+ :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
+ :vartype private_endpoint_connections:
+ azure.mgmt.machinelearningservices.aio.operations.PrivateEndpointConnectionsOperations
+ :ivar private_link_resources: PrivateLinkResourcesOperations operations
+ :vartype private_link_resources:
+ azure.mgmt.machinelearningservices.aio.operations.PrivateLinkResourcesOperations
+ :ivar managed_network_provisions: ManagedNetworkProvisionsOperations operations
+ :vartype managed_network_provisions:
+ azure.mgmt.machinelearningservices.aio.operations.ManagedNetworkProvisionsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
- :keyword api_version: Api Version. Default value is "2023-04-01". Note that overriding this
- default value may result in unsupported behavior.
+ :keyword api_version: Api Version. Default value is "2023-08-01-preview". Note that overriding
+ this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
@@ -198,21 +237,13 @@ def __init__(
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
- self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
- self.workspaces = WorkspacesOperations(self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.quotas = QuotasOperations(self._client, self._config, self._serialize, self._deserialize)
self.compute = ComputeOperations(self._client, self._config, self._serialize, self._deserialize)
- self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
- self.private_link_resources = PrivateLinkResourcesOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
- self.workspace_connections = WorkspaceConnectionsOperations(
+ self.capacity_reservation_groups = CapacityReservationGroupsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.registry_code_containers = RegistryCodeContainersOperations(
@@ -266,7 +297,21 @@ def __init__(
self.environment_versions = EnvironmentVersionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.featureset_containers = FeaturesetContainersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.features = FeaturesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.featureset_versions = FeaturesetVersionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.featurestore_entity_containers = FeaturestoreEntityContainersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.featurestore_entity_versions = FeaturestoreEntityVersionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.jobs = JobsOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.labeling_jobs = LabelingJobsOperations(self._client, self._config, self._serialize, self._deserialize)
self.model_containers = ModelContainersOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -278,10 +323,30 @@ def __init__(
self._client, self._config, self._serialize, self._deserialize
)
self.schedules = SchedulesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.serverless_endpoints = ServerlessEndpointsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.workspace_features = WorkspaceFeaturesOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
+ self.workspaces = WorkspacesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.workspace_connections = WorkspaceConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_network_settings_rule = ManagedNetworkSettingsRuleOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.private_link_resources = PrivateLinkResourcesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_network_provisions = ManagedNetworkProvisionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/__init__.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/__init__.py
index 4967e3af6930..1421ad90d913 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/__init__.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/__init__.py
@@ -6,15 +6,11 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from ._operations import Operations
-from ._workspaces_operations import WorkspacesOperations
from ._usages_operations import UsagesOperations
from ._virtual_machine_sizes_operations import VirtualMachineSizesOperations
from ._quotas_operations import QuotasOperations
from ._compute_operations import ComputeOperations
-from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
-from ._private_link_resources_operations import PrivateLinkResourcesOperations
-from ._workspace_connections_operations import WorkspaceConnectionsOperations
+from ._capacity_reservation_groups_operations import CapacityReservationGroupsOperations
from ._registry_code_containers_operations import RegistryCodeContainersOperations
from ._registry_code_versions_operations import RegistryCodeVersionsOperations
from ._registry_component_containers_operations import RegistryComponentContainersOperations
@@ -36,29 +32,39 @@
from ._datastores_operations import DatastoresOperations
from ._environment_containers_operations import EnvironmentContainersOperations
from ._environment_versions_operations import EnvironmentVersionsOperations
+from ._featureset_containers_operations import FeaturesetContainersOperations
+from ._features_operations import FeaturesOperations
+from ._featureset_versions_operations import FeaturesetVersionsOperations
+from ._featurestore_entity_containers_operations import FeaturestoreEntityContainersOperations
+from ._featurestore_entity_versions_operations import FeaturestoreEntityVersionsOperations
from ._jobs_operations import JobsOperations
+from ._labeling_jobs_operations import LabelingJobsOperations
from ._model_containers_operations import ModelContainersOperations
from ._model_versions_operations import ModelVersionsOperations
from ._online_endpoints_operations import OnlineEndpointsOperations
from ._online_deployments_operations import OnlineDeploymentsOperations
from ._schedules_operations import SchedulesOperations
+from ._serverless_endpoints_operations import ServerlessEndpointsOperations
from ._registries_operations import RegistriesOperations
from ._workspace_features_operations import WorkspaceFeaturesOperations
+from ._operations import Operations
+from ._workspaces_operations import WorkspacesOperations
+from ._workspace_connections_operations import WorkspaceConnectionsOperations
+from ._managed_network_settings_rule_operations import ManagedNetworkSettingsRuleOperations
+from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
+from ._private_link_resources_operations import PrivateLinkResourcesOperations
+from ._managed_network_provisions_operations import ManagedNetworkProvisionsOperations
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
- "Operations",
- "WorkspacesOperations",
"UsagesOperations",
"VirtualMachineSizesOperations",
"QuotasOperations",
"ComputeOperations",
- "PrivateEndpointConnectionsOperations",
- "PrivateLinkResourcesOperations",
- "WorkspaceConnectionsOperations",
+ "CapacityReservationGroupsOperations",
"RegistryCodeContainersOperations",
"RegistryCodeVersionsOperations",
"RegistryComponentContainersOperations",
@@ -80,14 +86,28 @@
"DatastoresOperations",
"EnvironmentContainersOperations",
"EnvironmentVersionsOperations",
+ "FeaturesetContainersOperations",
+ "FeaturesOperations",
+ "FeaturesetVersionsOperations",
+ "FeaturestoreEntityContainersOperations",
+ "FeaturestoreEntityVersionsOperations",
"JobsOperations",
+ "LabelingJobsOperations",
"ModelContainersOperations",
"ModelVersionsOperations",
"OnlineEndpointsOperations",
"OnlineDeploymentsOperations",
"SchedulesOperations",
+ "ServerlessEndpointsOperations",
"RegistriesOperations",
"WorkspaceFeaturesOperations",
+ "Operations",
+ "WorkspacesOperations",
+ "WorkspaceConnectionsOperations",
+ "ManagedNetworkSettingsRuleOperations",
+ "PrivateEndpointConnectionsOperations",
+ "PrivateLinkResourcesOperations",
+ "ManagedNetworkProvisionsOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_capacity_reservation_groups_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_capacity_reservation_groups_operations.py
new file mode 100644
index 000000000000..cec0efaa9f06
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_capacity_reservation_groups_operations.py
@@ -0,0 +1,658 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._capacity_reservation_groups_operations import (
+ build_capacity_reservation_groups_list_by_subscription_request,
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+ build_update_request,
+)
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class CapacityReservationGroupsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`capacity_reservation_groups` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def capacity_reservation_groups_list_by_subscription(
+ self, skip: Optional[str] = None, **kwargs: Any
+ ) -> AsyncIterable["_models.CapacityReservationGroup"]:
+ """capacity_reservation_groups_list_by_subscription.
+
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either CapacityReservationGroup or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.CapacityReservationGroup]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CapacityReservationGroupTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_capacity_reservation_groups_list_by_subscription_request(
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.capacity_reservation_groups_list_by_subscription.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize(
+ "CapacityReservationGroupTrackedResourceArmPaginatedResult", pipeline_response
+ )
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ capacity_reservation_groups_list_by_subscription.metadata = {
+ "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups"
+ }
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, skip: Optional[str] = None, **kwargs: Any
+ ) -> AsyncIterable["_models.CapacityReservationGroup"]:
+ """list.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either CapacityReservationGroup or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.CapacityReservationGroup]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CapacityReservationGroupTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize(
+ "CapacityReservationGroupTrackedResourceArmPaginatedResult", pipeline_response
+ )
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups"
+ }
+
+ @distributed_trace_async
+ async def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, group_id: str, **kwargs: Any
+ ) -> None:
+ """delete.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ group_id=group_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.delete.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}"
+ }
+
+ @distributed_trace_async
+ async def get(self, resource_group_name: str, group_id: str, **kwargs: Any) -> _models.CapacityReservationGroup:
+ """get.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: CapacityReservationGroup or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CapacityReservationGroup] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ group_id=group_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}"
+ }
+
+ @overload
+ async def update( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: _models.PartialMinimalTrackedResourceWithSkuAndIdentity,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def update( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO],
+ **kwargs: Any
+ ) -> None:
+ """update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Is either a PartialMinimalTrackedResourceWithSkuAndIdentity type or a IO type.
+ Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity or
+ IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PartialMinimalTrackedResourceWithSkuAndIdentity")
+
+ request = build_update_request(
+ resource_group_name=resource_group_name,
+ group_id=group_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}"
+ }
+
+ @overload
+ async def create_or_update(
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: _models.CapacityReservationGroup,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.CapacityReservationGroup:
+ """create_or_update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: CapacityReservationGroup or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def create_or_update(
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.CapacityReservationGroup:
+ """create_or_update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: CapacityReservationGroup or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def create_or_update(
+ self, resource_group_name: str, group_id: str, body: Union[_models.CapacityReservationGroup, IO], **kwargs: Any
+ ) -> _models.CapacityReservationGroup:
+ """create_or_update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Is either a CapacityReservationGroup type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: CapacityReservationGroup or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.CapacityReservationGroup] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "CapacityReservationGroup")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ group_id=group_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.create_or_update.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if response.status_code == 200:
+ deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
+
+ if response.status_code == 201:
+ deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_versions_operations.py
index 5910c2b49220..e7549f81e16e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_versions_operations.py
@@ -69,6 +69,7 @@ def list(
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ComponentVersion"]:
"""List component versions.
@@ -91,6 +92,8 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: Component stage. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentVersion or the result of cls(response)
:rtype:
@@ -123,6 +126,7 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_compute_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_compute_operations.py
index fff3c389b06b..775cb8fb2b21 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_compute_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_compute_operations.py
@@ -7,7 +7,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -34,13 +34,17 @@
from ...operations._compute_operations import (
build_create_or_update_request,
build_delete_request,
+ build_get_allowed_resize_sizes_request,
build_get_request,
build_list_keys_request,
build_list_nodes_request,
build_list_request,
+ build_resize_request,
build_restart_request,
build_start_request,
build_stop_request,
+ build_update_custom_services_request,
+ build_update_idle_shutdown_setting_request,
build_update_request,
)
@@ -48,7 +52,7 @@
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
-class ComputeOperations:
+class ComputeOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
@@ -866,6 +870,155 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
}
+ @overload
+ async def update_custom_services( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ custom_services: List[_models.CustomService],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """Updates the custom services list. The list of custom services provided shall be overwritten.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param custom_services: New list of Custom Services. Required.
+ :type custom_services: list[~azure.mgmt.machinelearningservices.models.CustomService]
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def update_custom_services( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ custom_services: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """Updates the custom services list. The list of custom services provided shall be overwritten.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param custom_services: New list of Custom Services. Required.
+ :type custom_services: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update_custom_services( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ custom_services: Union[List[_models.CustomService], IO],
+ **kwargs: Any
+ ) -> None:
+ """Updates the custom services list. The list of custom services provided shall be overwritten.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param custom_services: New list of Custom Services. Is either a [CustomService] type or a IO
+ type. Required.
+ :type custom_services: list[~azure.mgmt.machinelearningservices.models.CustomService] or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(custom_services, (IOBase, bytes)):
+ _content = custom_services
+ else:
+ _json = self._serialize.body(custom_services, "[CustomService]")
+
+ request = build_update_custom_services_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update_custom_services.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ update_custom_services.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/customServices"
+ }
+
@distributed_trace
def list_nodes(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
@@ -1384,3 +1537,452 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
begin_restart.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/restart"
}
+
+ @overload
+ async def update_idle_shutdown_setting( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: _models.IdleShutdownSetting,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """Updates the idle shutdown setting of a compute instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating idle shutdown setting of specified ComputeInstance.
+ Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.IdleShutdownSetting
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def update_idle_shutdown_setting( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """Updates the idle shutdown setting of a compute instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating idle shutdown setting of specified ComputeInstance.
+ Required.
+ :type parameters: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update_idle_shutdown_setting( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: Union[_models.IdleShutdownSetting, IO],
+ **kwargs: Any
+ ) -> None:
+ """Updates the idle shutdown setting of a compute instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating idle shutdown setting of specified ComputeInstance.
+ Is either a IdleShutdownSetting type or a IO type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.IdleShutdownSetting or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "IdleShutdownSetting")
+
+ request = build_update_idle_shutdown_setting_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update_idle_shutdown_setting.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ update_idle_shutdown_setting.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/updateIdleShutdownSetting"
+ }
+
+ @distributed_trace_async
+ async def get_allowed_resize_sizes(
+ self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
+ ) -> _models.VirtualMachineSizeListResult:
+ """Returns supported virtual machine sizes for resize.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: VirtualMachineSizeListResult or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.VirtualMachineSizeListResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.VirtualMachineSizeListResult] = kwargs.pop("cls", None)
+
+ request = build_get_allowed_resize_sizes_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get_allowed_resize_sizes.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get_allowed_resize_sizes.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/getAllowedVmSizesForResize"
+ }
+
+ async def _resize_initial( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: Union[_models.ResizeSchema, IO],
+ **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ResizeSchema")
+
+ request = build_resize_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._resize_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _resize_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/resize"
+ }
+
+ @overload
+ async def begin_resize(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: _models.ResizeSchema,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Updates the size of a Compute Instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating VM size setting of specified Compute Instance.
+ Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.ResizeSchema
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_resize(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Updates the size of a Compute Instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating VM size setting of specified Compute Instance.
+ Required.
+ :type parameters: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_resize(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: Union[_models.ResizeSchema, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Updates the size of a Compute Instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating VM size setting of specified Compute Instance. Is
+ either a ResizeSchema type or a IO type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.ResizeSchema or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._resize_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ parameters=parameters,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_resize.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/resize"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_versions_operations.py
index 875689bd263c..007f81e226ee 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_versions_operations.py
@@ -70,6 +70,7 @@ def list(
skip: Optional[str] = None,
tags: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.DataVersionBase"]:
"""List data versions in the data container.
@@ -99,6 +100,8 @@ def list(
ListViewType.All]View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: data stage. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataVersionBase or the result of cls(response)
:rtype:
@@ -132,6 +135,7 @@ def prepare_request(next_link=None):
skip=skip,
tags=tags,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_versions_operations.py
index 031930c06fc8..ddbf157d22bb 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_versions_operations.py
@@ -69,6 +69,7 @@ def list(
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.EnvironmentVersion"]:
"""List versions.
@@ -91,6 +92,9 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: Stage for including/excluding (for example) archived entities. Takes priority
+ over listViewType. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentVersion or the result of cls(response)
:rtype:
@@ -123,6 +127,7 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_features_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_features_operations.py
new file mode 100644
index 000000000000..713179b1c8e1
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_features_operations.py
@@ -0,0 +1,269 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._features_operations import build_get_request, build_list_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`features` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ feature_name: Optional[str] = None,
+ description: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 1000,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.Feature"]:
+ """List Features.
+
+ List Features.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param featureset_name: Featureset name. This is case-sensitive. Required.
+ :type featureset_name: str
+ :param featureset_version: Featureset Version identifier. This is case-sensitive. Required.
+ :type featureset_version: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param feature_name: feature name. Default value is None.
+ :type feature_name: str
+ :param description: Description of the featureset. Default value is None.
+ :type description: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: Page size. Default value is 1000.
+ :type page_size: int
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either Feature or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Feature]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeatureResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ featureset_name=featureset_name,
+ featureset_version=featureset_version,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ feature_name=feature_name,
+ description=description,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeatureResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features"
+ }
+
+ @distributed_trace_async
+ async def get(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ feature_name: str,
+ **kwargs: Any
+ ) -> _models.Feature:
+ """Get feature.
+
+ Get feature.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param featureset_name: Feature set name. This is case-sensitive. Required.
+ :type featureset_name: str
+ :param featureset_version: Feature set version identifier. This is case-sensitive. Required.
+ :type featureset_version: str
+ :param feature_name: Feature Name. This is case-sensitive. Required.
+ :type feature_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: Feature or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.Feature
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.Feature] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ featureset_name=featureset_name,
+ featureset_version=featureset_version,
+ feature_name=feature_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Feature", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features/{featureName}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_containers_operations.py
new file mode 100644
index 000000000000..da0306692f1d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_containers_operations.py
@@ -0,0 +1,649 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._featureset_containers_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_entity_request,
+ build_list_request,
+)
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturesetContainersOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`featureset_containers` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.FeaturesetContainer"]:
+ """List featurestore entity containers.
+
+ List featurestore entity containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param name: name for the featureset. Default value is None.
+ :type name: str
+ :param description: description for the feature set. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FeaturesetContainer or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ name=name,
+ description=description,
+ created_by=created_by,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturesetContainerResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets"
+ }
+
+ async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete container.
+
+ Delete container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
+
+ @distributed_trace_async
+ async def get_entity(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.FeaturesetContainer:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: FeaturesetContainer or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+
+ request = build_get_entity_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get_entity.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get_entity.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturesetContainer, IO],
+ **kwargs: Any
+ ) -> _models.FeaturesetContainer:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetContainer")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.FeaturesetContainer,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturesetContainer, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Is either a FeaturesetContainer type or a IO
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_versions_operations.py
new file mode 100644
index 000000000000..b2ebbb208325
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_versions_operations.py
@@ -0,0 +1,945 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._featureset_versions_operations import (
+ build_backfill_request,
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+)
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturesetVersionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`featureset_versions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.FeaturesetVersion"]:
+ """List versions.
+
+ List versions.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Featureset name. This is case-sensitive. Required.
+ :type name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param version_name: name for the featureset version. Default value is None.
+ :type version_name: str
+ :param version: featureset version. Default value is None.
+ :type version: str
+ :param description: description for the feature set version. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :param stage: Specifies the featurestore stage. Default value is None.
+ :type stage: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FeaturesetVersion or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ version_name=version_name,
+ version=version,
+ description=description,
+ created_by=created_by,
+ stage=stage,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions"
+ }
+
+ async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete version.
+
+ Delete version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> _models.FeaturesetVersion:
+ """Get version.
+
+ Get version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: FeaturesetVersion or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersion, IO],
+ **kwargs: Any
+ ) -> _models.FeaturesetVersion:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetVersion")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturesetVersion,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersion, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Is either a FeaturesetVersion type or a IO
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ async def _backfill_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersionBackfillRequest, IO],
+ **kwargs: Any
+ ) -> Optional[_models.FeaturesetVersionBackfillResponse]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.FeaturesetVersionBackfillResponse]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetVersionBackfillRequest")
+
+ request = build_backfill_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._backfill_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturesetVersionBackfillResponse", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _backfill_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}/backfill"
+ }
+
+ @overload
+ async def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturesetVersionBackfillRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersionBackfillResponse]:
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersionBackfillResponse or
+ the result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersionBackfillResponse]:
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersionBackfillResponse or
+ the result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersionBackfillRequest, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersionBackfillResponse]:
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Is either a
+ FeaturesetVersionBackfillRequest type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillRequest or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersionBackfillResponse or
+ the result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersionBackfillResponse] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._backfill_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersionBackfillResponse", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_backfill.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}/backfill"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_containers_operations.py
new file mode 100644
index 000000000000..1a68f75af25d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_containers_operations.py
@@ -0,0 +1,650 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._featurestore_entity_containers_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_entity_request,
+ build_list_request,
+)
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturestoreEntityContainersOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`featurestore_entity_containers` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.FeaturestoreEntityContainer"]:
+ """List featurestore entity containers.
+
+ List featurestore entity containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param name: name for the featurestore entity. Default value is None.
+ :type name: str
+ :param description: description for the featurestore entity. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FeaturestoreEntityContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ name=name,
+ description=description,
+ created_by=created_by,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityContainerResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities"
+ }
+
+ async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete container.
+
+ Delete container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
+
+ @distributed_trace_async
+ async def get_entity(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.FeaturestoreEntityContainer:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: FeaturestoreEntityContainer or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+
+ request = build_get_entity_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get_entity.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get_entity.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturestoreEntityContainer, IO],
+ **kwargs: Any
+ ) -> _models.FeaturestoreEntityContainer:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturestoreEntityContainer")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.FeaturestoreEntityContainer,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityContainer or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityContainer or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturestoreEntityContainer, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Is either a FeaturestoreEntityContainer type
+ or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityContainer or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_versions_operations.py
new file mode 100644
index 000000000000..65885ab1ab71
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_versions_operations.py
@@ -0,0 +1,681 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._featurestore_entity_versions_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+)
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturestoreEntityVersionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`featurestore_entity_versions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.FeaturestoreEntityVersion"]:
+ """List versions.
+
+ List versions.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Feature entity name. This is case-sensitive. Required.
+ :type name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param version_name: name for the featurestore entity version. Default value is None.
+ :type version_name: str
+ :param version: featurestore entity version. Default value is None.
+ :type version: str
+ :param description: description for the feature entity version. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :param stage: Specifies the featurestore stage. Default value is None.
+ :type stage: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FeaturestoreEntityVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ version_name=version_name,
+ version=version,
+ description=description,
+ created_by=created_by,
+ stage=stage,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityVersionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions"
+ }
+
+ async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete version.
+
+ Delete version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> _models.FeaturestoreEntityVersion:
+ """Get version.
+
+ Get version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: FeaturestoreEntityVersion or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturestoreEntityVersion, IO],
+ **kwargs: Any
+ ) -> _models.FeaturestoreEntityVersion:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturestoreEntityVersion")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturestoreEntityVersion,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityVersion or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityVersion or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturestoreEntityVersion, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Is either a FeaturestoreEntityVersion type or
+ a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityVersion or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_jobs_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_jobs_operations.py
index 8196b0804d30..316f54fb9024 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_jobs_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_jobs_operations.py
@@ -37,6 +37,7 @@
build_delete_request,
build_get_request,
build_list_request,
+ build_update_request,
)
T = TypeVar("T")
@@ -71,6 +72,10 @@ def list(
job_type: Optional[str] = None,
tag: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ asset_name: Optional[str] = None,
+ scheduled: Optional[bool] = None,
+ schedule_id: Optional[str] = None,
+ properties: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.JobBase"]:
"""Lists Jobs in the workspace.
@@ -91,6 +96,15 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param asset_name: Asset name the job's named output is registered with. Default value is None.
+ :type asset_name: str
+ :param scheduled: Indicator whether the job is scheduled job. Default value is None.
+ :type scheduled: bool
+ :param schedule_id: The scheduled id for listing the job triggered from. Default value is None.
+ :type schedule_id: str
+ :param properties: Comma-separated list of property names (and optionally values). Example:
+ prop1,prop2=value2. Default value is None.
+ :type properties: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobBase or the result of cls(response)
:rtype:
@@ -122,6 +136,10 @@ def prepare_request(next_link=None):
job_type=job_type,
tag=tag,
list_view_type=list_view_type,
+ asset_name=asset_name,
+ scheduled=scheduled,
+ schedule_id=schedule_id,
+ properties=properties,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
@@ -375,6 +393,165 @@ async def get(self, resource_group_name: str, workspace_name: str, id: str, **kw
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
}
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: _models.PartialJobBasePartialResource,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.JobBase:
+ """Updates a Job.
+
+ Updates a Job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the Job. This is case-sensitive. Required.
+ :type id: str
+ :param body: Job definition to apply during the operation. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PartialJobBasePartialResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: JobBase or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.JobBase
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.JobBase:
+ """Updates a Job.
+
+ Updates a Job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the Job. This is case-sensitive. Required.
+ :type id: str
+ :param body: Job definition to apply during the operation. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: JobBase or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.JobBase
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.PartialJobBasePartialResource, IO],
+ **kwargs: Any
+ ) -> _models.JobBase:
+ """Updates a Job.
+
+ Updates a Job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the Job. This is case-sensitive. Required.
+ :type id: str
+ :param body: Job definition to apply during the operation. Is either a
+ PartialJobBasePartialResource type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PartialJobBasePartialResource or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: JobBase or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.JobBase
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.JobBase] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PartialJobBasePartialResource")
+
+ request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("JobBase", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
+ }
+
@overload
async def create_or_update(
self,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_labeling_jobs_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_labeling_jobs_operations.py
new file mode 100644
index 000000000000..32bb9cf7a148
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_labeling_jobs_operations.py
@@ -0,0 +1,1027 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._labeling_jobs_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_export_labels_request,
+ build_get_request,
+ build_list_request,
+ build_pause_request,
+ build_resume_request,
+)
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class LabelingJobsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`labeling_jobs` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ top: Optional[int] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.LabelingJob"]:
+ """Lists labeling jobs in the workspace.
+
+ Lists labeling jobs in the workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param top: Number of labeling jobs to return. Default value is None.
+ :type top: int
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either LabelingJob or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.LabelingJobResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ top=top,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("LabelingJobResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs"
+ }
+
+ @distributed_trace_async
+ async def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
+ ) -> None:
+ """Delete a labeling job.
+
+ Delete a labeling job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.delete.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}"
+ }
+
+ @distributed_trace_async
+ async def get(self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any) -> _models.LabelingJob:
+ """Gets a labeling job by name/id.
+
+ Gets a labeling job by name/id.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: LabelingJob or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.LabelingJob
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.LabelingJob] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("LabelingJob", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}"
+ }
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.LabelingJob, IO],
+ **kwargs: Any
+ ) -> _models.LabelingJob:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.LabelingJob] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "LabelingJob")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("LabelingJob", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("LabelingJob", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}"
+ }
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: _models.LabelingJob,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.LabelingJob]:
+ """Creates or updates a labeling job (asynchronous).
+
+ Creates or updates a labeling job (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: LabelingJob definition object. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.LabelingJob
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either LabelingJob or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.LabelingJob]:
+ """Creates or updates a labeling job (asynchronous).
+
+ Creates or updates a labeling job (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: LabelingJob definition object. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either LabelingJob or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.LabelingJob, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.LabelingJob]:
+ """Creates or updates a labeling job (asynchronous).
+
+ Creates or updates a labeling job (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: LabelingJob definition object. Is either a LabelingJob type or a IO type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.LabelingJob or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either LabelingJob or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.LabelingJob] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("LabelingJob", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}"
+ }
+
+ async def _export_labels_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.ExportSummary, IO],
+ **kwargs: Any
+ ) -> Optional[_models.ExportSummary]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.ExportSummary]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "ExportSummary")
+
+ request = build_export_labels_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._export_labels_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("ExportSummary", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _export_labels_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/exportLabels"
+ }
+
+ @overload
+ async def begin_export_labels(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: _models.ExportSummary,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ExportSummary]:
+ """Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: The export summary. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ExportSummary
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ExportSummary or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ExportSummary]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_export_labels(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ExportSummary]:
+ """Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: The export summary. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ExportSummary or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ExportSummary]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_export_labels(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.ExportSummary, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ExportSummary]:
+ """Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: The export summary. Is either a ExportSummary type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ExportSummary or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ExportSummary or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ExportSummary]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ExportSummary] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._export_labels_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ExportSummary", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_export_labels.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/exportLabels"
+ }
+
+ @distributed_trace_async
+ async def pause(
+ self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
+ ) -> _models.LabelingJobProperties:
+ """Pause a labeling job.
+
+ Pause a labeling job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: LabelingJobProperties or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.LabelingJobProperties
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.LabelingJobProperties] = kwargs.pop("cls", None)
+
+ request = build_pause_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.pause.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("LabelingJobProperties", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ pause.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/pause"
+ }
+
+ async def _resume_initial(
+ self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
+ ) -> Optional[_models.LabelingJobProperties]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Optional[_models.LabelingJobProperties]] = kwargs.pop("cls", None)
+
+ request = build_resume_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._resume_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("LabelingJobProperties", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _resume_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/resume"
+ }
+
+ @distributed_trace_async
+ async def begin_resume(
+ self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
+ ) -> AsyncLROPoller[_models.LabelingJobProperties]:
+ """Resume a labeling job (asynchronous).
+
+ Resume a labeling job (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either LabelingJobProperties or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.LabelingJobProperties]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.LabelingJobProperties] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._resume_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("LabelingJobProperties", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_resume.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/resume"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_provisions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_provisions_operations.py
new file mode 100644
index 000000000000..c925e1e999d2
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_provisions_operations.py
@@ -0,0 +1,299 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._managed_network_provisions_operations import build_provision_managed_network_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ManagedNetworkProvisionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`managed_network_provisions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ async def _provision_managed_network_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[Union[_models.ManagedNetworkProvisionOptions, IO]] = None,
+ **kwargs: Any
+ ) -> Optional[_models.ManagedNetworkProvisionStatus]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.ManagedNetworkProvisionStatus]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "ManagedNetworkProvisionOptions")
+ else:
+ _json = None
+
+ request = build_provision_managed_network_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._provision_managed_network_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("ManagedNetworkProvisionStatus", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _provision_managed_network_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/provisionManagedNetwork"
+ }
+
+ @overload
+ async def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[_models.ManagedNetworkProvisionOptions] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Default
+ value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionOptions
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[IO] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Default
+ value is None.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[Union[_models.ManagedNetworkProvisionOptions, IO]] = None,
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Is either a
+ ManagedNetworkProvisionOptions type or a IO type. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionOptions or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedNetworkProvisionStatus] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._provision_managed_network_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ManagedNetworkProvisionStatus", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_provision_managed_network.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/provisionManagedNetwork"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_settings_rule_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_settings_rule_operations.py
new file mode 100644
index 000000000000..46cfb3f2e11b
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_settings_rule_operations.py
@@ -0,0 +1,609 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._managed_network_settings_rule_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+)
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ManagedNetworkSettingsRuleOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`managed_network_settings_rule` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, **kwargs: Any
+ ) -> AsyncIterable["_models.OutboundRuleBasicResource"]:
+ """Lists the managed network outbound rules for a machine learning workspace.
+
+ Lists the managed network outbound rules for a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either OutboundRuleBasicResource or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundRuleListResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("OutboundRuleListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules"
+ }
+
+ async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes an outbound rule from the managed network of a machine learning workspace.
+
+ Deletes an outbound rule from the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> _models.OutboundRuleBasicResource:
+ """Gets an outbound rule from the managed network of a machine learning workspace.
+
+ Gets an outbound rule from the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: OutboundRuleBasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundRuleBasicResource] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: Union[_models.OutboundRuleBasicResource, IO],
+ **kwargs: Any
+ ) -> Optional[_models.OutboundRuleBasicResource]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.OutboundRuleBasicResource]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "OutboundRuleBasicResource")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: _models.OutboundRuleBasicResource,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either OutboundRuleBasicResource or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either OutboundRuleBasicResource or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: Union[_models.OutboundRuleBasicResource, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Is either a OutboundRuleBasicResource type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either OutboundRuleBasicResource or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.OutboundRuleBasicResource] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_versions_operations.py
index 8d601cec0072..f26a3b34c85e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_versions_operations.py
@@ -7,7 +7,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -21,11 +21,13 @@
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
@@ -34,6 +36,7 @@
build_delete_request,
build_get_request,
build_list_request,
+ build_package_request,
)
T = TypeVar("T")
@@ -75,6 +78,7 @@ def list(
properties: Optional[str] = None,
feed: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ModelVersion"]:
"""List model versions.
@@ -111,6 +115,8 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: Model stage. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelVersion or the result of cls(response)
:rtype:
@@ -149,6 +155,7 @@ def prepare_request(next_link=None):
properties=properties,
feed=feed,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
@@ -518,3 +525,267 @@ async def create_or_update(
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}"
}
+
+ async def _package_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.PackageRequest, IO],
+ **kwargs: Any
+ ) -> Optional[_models.PackageResponse]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.PackageResponse]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PackageRequest")
+
+ request = build_package_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._package_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("PackageResponse", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _package_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}/package"
+ }
+
+ @overload
+ async def begin_package(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.PackageRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PackageRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_package(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_package(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.PackageRequest, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Is either a PackageRequest type or a IO type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PackageRequest or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.PackageResponse] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._package_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("PackageResponse", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_package.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}/package"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_operations.py
index 3edc6c4bb6de..b04a2f715594 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_operations.py
@@ -53,20 +53,22 @@ def __init__(self, *args, **kwargs) -> None:
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
- def list(self, **kwargs: Any) -> AsyncIterable["_models.AmlOperation"]:
+ def list(self, **kwargs: Any) -> AsyncIterable["_models.Operation"]:
"""Lists all of the available Azure Machine Learning Workspaces REST API operations.
+ Lists all of the available Azure Machine Learning Workspaces REST API operations.
+
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either AmlOperation or the result of cls(response)
+ :return: An iterator like instance of either Operation or the result of cls(response)
:rtype:
- ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.AmlOperation]
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.AmlOperationListResult] = kwargs.pop("cls", None)
+ cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
@@ -107,7 +109,7 @@ def prepare_request(next_link=None):
return request
async def extract_data(pipeline_response):
- deserialized = self._deserialize("AmlOperationListResult", pipeline_response)
+ deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_endpoint_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_endpoint_connections_operations.py
index 7a7f0546597b..81f0b683eea2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_endpoint_connections_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_endpoint_connections_operations.py
@@ -63,12 +63,14 @@ def __init__(self, *args, **kwargs) -> None:
def list(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnection"]:
- """List all the private endpoint connections associated with the workspace.
+ """Called by end-users to get all PE connections.
+
+ Called by end-users to get all PE connections.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnection or the result of
@@ -153,19 +155,86 @@ async def get_next(next_link=None):
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections"
}
+ @distributed_trace_async
+ async def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any
+ ) -> None:
+ """Called by end-users to delete a PE connection.
+
+ Called by end-users to delete a PE connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
+ :type private_endpoint_connection_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.delete.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
+ }
+
@distributed_trace_async
async def get(
self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
- """Gets the specified private endpoint connection associated with the workspace.
+ """Called by end-users to get a PE connection.
+
+ Called by end-users to get a PE connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
@@ -228,23 +297,26 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: _models.PrivateEndpointConnection,
+ body: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
- """Update the state of specified private endpoint connection associated with the workspace.
+ """Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
+
+ Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
:type private_endpoint_connection_name: str
- :param properties: The private endpoint connection properties. Required.
- :type properties: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
+ :param body: PrivateEndpointConnection object. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
@@ -260,23 +332,26 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: IO,
+ body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
- """Update the state of specified private endpoint connection associated with the workspace.
+ """Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
+
+ Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
:type private_endpoint_connection_name: str
- :param properties: The private endpoint connection properties. Required.
- :type properties: IO
+ :param body: PrivateEndpointConnection object. Required.
+ :type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
@@ -292,22 +367,25 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: Union[_models.PrivateEndpointConnection, IO],
+ body: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
- """Update the state of specified private endpoint connection associated with the workspace.
+ """Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
+
+ Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
:type private_endpoint_connection_name: str
- :param properties: The private endpoint connection properties. Is either a
- PrivateEndpointConnection type or a IO type. Required.
- :type properties: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection or IO
+ :param body: PrivateEndpointConnection object. Is either a PrivateEndpointConnection type or a
+ IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
@@ -334,10 +412,10 @@ async def create_or_update(
content_type = content_type or "application/json"
_json = None
_content = None
- if isinstance(properties, (IOBase, bytes)):
- _content = properties
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
else:
- _json = self._serialize.body(properties, "PrivateEndpointConnection")
+ _json = self._serialize.body(body, "PrivateEndpointConnection")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
@@ -377,68 +455,3 @@ async def create_or_update(
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
-
- @distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any
- ) -> None:
- """Deletes the specified private endpoint connection associated with the workspace.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
- :type private_endpoint_connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: None or the result of cls(response)
- :rtype: None
- :raises ~azure.core.exceptions.HttpResponseError:
- """
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
-
- request = build_delete_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- private_endpoint_connection_name=private_endpoint_connection_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.delete.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 204]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
-
- if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_link_resources_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_link_resources_operations.py
index 3e0bfcb8b50c..43f983dd2b56 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_link_resources_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_link_resources_operations.py
@@ -6,8 +6,10 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from typing import Any, Callable, Dict, Optional, TypeVar
+from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
+import urllib.parse
+from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
@@ -19,7 +21,7 @@
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
-from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -50,22 +52,43 @@ def __init__(self, *args, **kwargs) -> None:
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
- @distributed_trace_async
- async def list(
+ @distributed_trace
+ def list(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.PrivateLinkResourceListResult:
- """Gets the private link resources that need to be created for a workspace.
+ ) -> AsyncIterable["_models.PrivateLinkResource"]:
+ """Called by Client (Portal, CLI, etc) to get available "private link resources" for the
+ workspace.
+ Each "private link resource" is a connection endpoint (IP address) to the resource.
+ Pre single connection endpoint per workspace: the Data Plane IP address, returned by DNS
+ resolution.
+ Other RPs, such as Azure Storage, have multiple - one for Blobs, other for Queues, etc.
+ Defined in the "[NRP] Private Endpoint Design" doc, topic "GET API for GroupIds".
+
+ Called by Client (Portal, CLI, etc) to get available "private link resources" for the
+ workspace.
+ Each "private link resource" is a connection endpoint (IP address) to the resource.
+ Pre single connection endpoint per workspace: the Data Plane IP address, returned by DNS
+ resolution.
+ Other RPs, such as Azure Storage, have multiple - one for Blobs, other for Queues, etc.
+ Defined in the "[NRP] Private Endpoint Design" doc, topic "GET API for GroupIds".
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: PrivateLinkResourceListResult or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.PrivateLinkResourceListResult
+ :return: An iterator like instance of either PrivateLinkResource or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None)
+
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -74,42 +97,63 @@ async def list(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None)
-
- request = build_list_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.list.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
-
- deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateLinkResources"
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_versions_operations.py
index 07bb1a688545..432f1b8555a7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_versions_operations.py
@@ -70,6 +70,7 @@ def list(
order_by: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[str] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ComponentVersion"]:
"""List versions.
@@ -90,6 +91,8 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
+ :param stage: Component stage. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentVersion or the result of cls(response)
:rtype:
@@ -121,6 +124,7 @@ def prepare_request(next_link=None):
order_by=order_by,
top=top,
skip=skip,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_versions_operations.py
index 10560e9f5fdb..9529caa41ef1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_versions_operations.py
@@ -71,6 +71,7 @@ def list(
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.EnvironmentVersion"]:
"""List versions.
@@ -94,6 +95,9 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: Stage for including/excluding (for example) archived entities. Takes priority
+ over listViewType. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentVersion or the result of cls(response)
:rtype:
@@ -126,6 +130,7 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_versions_operations.py
index 512c6ba1df76..30b748212e41 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_versions_operations.py
@@ -37,6 +37,7 @@
build_delete_request,
build_get_request,
build_list_request,
+ build_package_request,
)
T = TypeVar("T")
@@ -682,6 +683,273 @@ def get_long_running_output(pipeline_response):
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
}
+ async def _package_initial(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ model_name: str,
+ version: str,
+ body: Union[_models.PackageRequest, IO],
+ **kwargs: Any
+ ) -> Optional[_models.PackageResponse]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.PackageResponse]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PackageRequest")
+
+ request = build_package_request(
+ resource_group_name=resource_group_name,
+ registry_name=registry_name,
+ model_name=model_name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._package_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("PackageResponse", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _package_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}/package"
+ }
+
+ @overload
+ async def begin_package(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ model_name: str,
+ version: str,
+ body: _models.PackageRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param model_name: Container name. This is case-sensitive. Required.
+ :type model_name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PackageRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_package(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ model_name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param model_name: Container name. This is case-sensitive. Required.
+ :type model_name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_package(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ model_name: str,
+ version: str,
+ body: Union[_models.PackageRequest, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param model_name: Container name. This is case-sensitive. Required.
+ :type model_name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Is either a PackageRequest type or a IO type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PackageRequest or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.PackageResponse] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._package_initial(
+ resource_group_name=resource_group_name,
+ registry_name=registry_name,
+ model_name=model_name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("PackageResponse", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_package.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}/package"
+ }
+
@overload
async def create_or_get_start_pending_upload(
self,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_serverless_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_serverless_endpoints_operations.py
new file mode 100644
index 000000000000..4571145b33c5
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_serverless_endpoints_operations.py
@@ -0,0 +1,1270 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._vendor import _convert_request
+from ...operations._serverless_endpoints_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_get_status_request,
+ build_list_keys_request,
+ build_list_request,
+ build_regenerate_keys_request,
+ build_update_request,
+)
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ServerlessEndpointsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`serverless_endpoints` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, skip: Optional[str] = None, **kwargs: Any
+ ) -> AsyncIterable["_models.ServerlessEndpoint"]:
+ """List Serverless Endpoints.
+
+ List Serverless Endpoints.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either ServerlessEndpoint or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpointTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpointTrackedResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints"
+ }
+
+ async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete Serverless Endpoint (asynchronous).
+
+ Delete Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.ServerlessEndpoint:
+ """Get Serverless Endpoint.
+
+ Get Serverless Endpoint.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: ServerlessEndpoint or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ async def _update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO],
+ **kwargs: Any
+ ) -> Optional[_models.ServerlessEndpoint]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.ServerlessEndpoint]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PartialMinimalTrackedResourceWithSkuAndIdentity")
+
+ request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @overload
+ async def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.PartialMinimalTrackedResourceWithSkuAndIdentity,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Is either a
+ PartialMinimalTrackedResourceWithSkuAndIdentity type or a IO type. Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity or
+ IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.ServerlessEndpoint, IO],
+ **kwargs: Any
+ ) -> _models.ServerlessEndpoint:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "ServerlessEndpoint")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.ServerlessEndpoint,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.ServerlessEndpoint, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Is either a
+ ServerlessEndpoint type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @distributed_trace_async
+ async def list_keys(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.EndpointAuthKeys:
+ """List EndpointAuthKeys for an Endpoint using Key-based authentication.
+
+ List EndpointAuthKeys for an Endpoint using Key-based authentication.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: EndpointAuthKeys or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
+
+ request = build_list_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list_keys.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ list_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/listKeys"
+ }
+
+ async def _regenerate_keys_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.RegenerateEndpointKeysRequest, IO],
+ **kwargs: Any
+ ) -> Optional[_models.EndpointAuthKeys]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.EndpointAuthKeys]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "RegenerateEndpointKeysRequest")
+
+ request = build_regenerate_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._regenerate_keys_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _regenerate_keys_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/regenerateKeys"
+ }
+
+ @overload
+ async def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.RegenerateEndpointKeysRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.RegenerateEndpointKeysRequest, IO],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Is either a RegenerateEndpointKeysRequest type or a IO
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._regenerate_keys_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_regenerate_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/regenerateKeys"
+ }
+
+ @distributed_trace_async
+ async def get_status(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.ServerlessEndpointStatus:
+ """Status of the model backing the Serverless Endpoint.
+
+ Status of the model backing the Serverless Endpoint.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: ServerlessEndpointStatus or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ServerlessEndpointStatus
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpointStatus] = kwargs.pop("cls", None)
+
+ request = build_get_status_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get_status.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ServerlessEndpointStatus", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get_status.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/getStatus"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_connections_operations.py
index 1c796d31405b..6f0ac48776a6 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_connections_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_connections_operations.py
@@ -7,7 +7,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
+from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -21,11 +21,13 @@
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
@@ -34,6 +36,9 @@
build_delete_request,
build_get_request,
build_list_request,
+ build_list_secrets_request,
+ build_test_connection_request,
+ build_update_request,
)
T = TypeVar("T")
@@ -59,28 +64,447 @@ def __init__(self, *args, **kwargs) -> None:
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ target: Optional[str] = None,
+ category: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.WorkspaceConnectionPropertiesV2BasicResource"]:
+ """Lists all the available machine learning workspaces connections under the specified workspace.
+
+ Lists all the available machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param target: Target of the workspace connection. Default value is None.
+ :type target: str
+ :param category: Category of the workspace connection. Default value is None.
+ :type category: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either WorkspaceConnectionPropertiesV2BasicResource or
+ the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ target=target,
+ category=category,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult", pipeline_response
+ )
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections"
+ }
+
+ @distributed_trace_async
+ async def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any
+ ) -> None:
+ """Delete machine learning workspaces connections by name.
+
+ Delete machine learning workspaces connections by name.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.delete.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
+ }
+
+ @distributed_trace_async
+ async def get(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ aoai_models_to_deploy: Optional[str] = None,
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Lists machine learning workspaces connections by name.
+
+ Lists machine learning workspaces connections by name.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param aoai_models_to_deploy: query parameter for which AOAI mode should be deployed. Default
+ value is None.
+ :type aoai_models_to_deploy: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ aoai_models_to_deploy=aoai_models_to_deploy,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("WorkspaceConnectionPropertiesV2BasicResource", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
+ }
+
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[_models.WorkspaceConnectionUpdateParameter] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUpdateParameter
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[IO] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Default value is None.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[Union[_models.WorkspaceConnectionUpdateParameter, IO]] = None,
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Is either a
+ WorkspaceConnectionUpdateParameter type or a IO type. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUpdateParameter or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "WorkspaceConnectionUpdateParameter")
+ else:
+ _json = None
+
+ request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("WorkspaceConnectionPropertiesV2BasicResource", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
+ }
+
@overload
async def create(
self,
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: _models.WorkspaceConnectionPropertiesV2BasicResource,
+ body: Optional[_models.WorkspaceConnectionPropertiesV2BasicResource] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
- """create.
+ """Create or update machine learning workspaces connections under the specified workspace.
+
+ Create or update machine learning workspaces connections under the specified workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :param parameters: The object for creating or updating a new workspace connection. Required.
- :type parameters:
+ :param body: The object for creating or updating a new workspace connection. Default value is
+ None.
+ :type body:
~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
@@ -97,22 +521,25 @@ async def create(
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: IO,
+ body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
- """create.
+ """Create or update machine learning workspaces connections under the specified workspace.
+
+ Create or update machine learning workspaces connections under the specified workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :param parameters: The object for creating or updating a new workspace connection. Required.
- :type parameters: IO
+ :param body: The object for creating or updating a new workspace connection. Default value is
+ None.
+ :type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
@@ -128,21 +555,23 @@ async def create(
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO],
+ body: Optional[Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO]] = None,
**kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
- """create.
+ """Create or update machine learning workspaces connections under the specified workspace.
+
+ Create or update machine learning workspaces connections under the specified workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :param parameters: The object for creating or updating a new workspace connection. Is either a
- WorkspaceConnectionPropertiesV2BasicResource type or a IO type. Required.
- :type parameters:
+ :param body: The object for creating or updating a new workspace connection. Is either a
+ WorkspaceConnectionPropertiesV2BasicResource type or a IO type. Default value is None.
+ :type body:
~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
@@ -170,10 +599,13 @@ async def create(
content_type = content_type or "application/json"
_json = None
_content = None
- if isinstance(parameters, (IOBase, bytes)):
- _content = parameters
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
else:
- _json = self._serialize.body(parameters, "WorkspaceConnectionPropertiesV2BasicResource")
+ if body is not None:
+ _json = self._serialize.body(body, "WorkspaceConnectionPropertiesV2BasicResource")
+ else:
+ _json = None
request = build_create_request(
resource_group_name=resource_group_name,
@@ -215,18 +647,28 @@ async def create(
}
@distributed_trace_async
- async def get(
- self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any
+ async def list_secrets(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ aoai_models_to_deploy: Optional[str] = None,
+ **kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
- """get.
+ """List all the secrets of a machine learning workspaces connections.
+
+ List all the secrets of a machine learning workspaces connections.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
+ :param aoai_models_to_deploy: query parameter for which AOAI mode should be deployed. Default
+ value is None.
+ :type aoai_models_to_deploy: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
@@ -246,13 +688,14 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
- request = build_get_request(
+ request = build_list_secrets_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
subscription_id=self._config.subscription_id,
+ aoai_models_to_deploy=aoai_models_to_deploy,
api_version=api_version,
- template_url=self.get.metadata["url"],
+ template_url=self.list_secrets.metadata["url"],
headers=_headers,
params=_params,
)
@@ -278,28 +721,18 @@ async def get(
return deserialized
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
+ list_secrets.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/listsecrets"
}
- @distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any
+ async def _test_connection_initial( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO]] = None,
+ **kwargs: Any
) -> None:
- """delete.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :param connection_name: Friendly name of the workspace connection. Required.
- :type connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: None or the result of cls(response)
- :rtype: None
- :raises ~azure.core.exceptions.HttpResponseError:
- """
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -308,19 +741,34 @@ async def delete( # pylint: disable=inconsistent-return-statements
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "WorkspaceConnectionPropertiesV2BasicResource")
+ else:
+ _json = None
+
+ request = build_test_connection_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._test_connection_initial.metadata["url"],
headers=_headers,
params=_params,
)
@@ -334,121 +782,185 @@ async def delete( # pylint: disable=inconsistent-return-statements
response = pipeline_response.http_response
- if response.status_code not in [200, 204]:
+ if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ response_headers = {}
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, response_headers)
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
+ _test_connection_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/testconnection"
}
- @distributed_trace
- def list(
+ @overload
+ async def begin_test_connection(
self,
resource_group_name: str,
workspace_name: str,
- target: Optional[str] = None,
- category: Optional[str] = None,
+ connection_name: str,
+ body: Optional[_models.WorkspaceConnectionPropertiesV2BasicResource] = None,
+ *,
+ content_type: str = "application/json",
**kwargs: Any
- ) -> AsyncIterable["_models.WorkspaceConnectionPropertiesV2BasicResource"]:
- """list.
+ ) -> AsyncLROPoller[None]:
+ """Test machine learning workspaces connections under the specified workspace.
+
+ Test machine learning workspaces connections under the specified workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param target: Target of the workspace connection. Default value is None.
- :type target: str
- :param category: Category of the workspace connection. Default value is None.
- :type category: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Workspace Connection object. Default value is None.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either WorkspaceConnectionPropertiesV2BasicResource or
- the result of cls(response)
- :rtype:
- ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource]
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult] = kwargs.pop("cls", None)
-
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
+ @overload
+ async def begin_test_connection(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[IO] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Test machine learning workspaces connections under the specified workspace.
- def prepare_request(next_link=None):
- if not next_link:
+ Test machine learning workspaces connections under the specified workspace.
- request = build_list_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- target=target,
- category=category,
- api_version=api_version,
- template_url=self.list.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Workspace Connection object. Default value is None.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
- else:
- # make call to next link with the client's api-version
- _parsed_next_link = urllib.parse.urlparse(next_link)
- _next_request_params = case_insensitive_dict(
- {
- key: [urllib.parse.quote(v) for v in value]
- for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
- }
- )
- _next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
- "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ @distributed_trace_async
+ async def begin_test_connection(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO]] = None,
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Test machine learning workspaces connections under the specified workspace.
- async def extract_data(pipeline_response):
- deserialized = self._deserialize(
- "WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult", pipeline_response
- )
- list_of_elem = deserialized.value
- if cls:
- list_of_elem = cls(list_of_elem) # type: ignore
- return deserialized.next_link or None, AsyncList(list_of_elem)
+ Test machine learning workspaces connections under the specified workspace.
- async def get_next(next_link=None):
- request = prepare_request(next_link)
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Workspace Connection object. Is either a
+ WorkspaceConnectionPropertiesV2BasicResource type or a IO type. Default value is None.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- _stream = False
- pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._test_connection_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ kwargs.pop("error_map", None)
- return pipeline_response
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
- return AsyncItemPaged(get_next, extract_data)
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections"
+ begin_test_connection.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/testconnection"
}
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspaces_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspaces_operations.py
index 1195fbbbd75b..e83a9a137fac 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspaces_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspaces_operations.py
@@ -71,20 +71,30 @@ def __init__(self, *args, **kwargs) -> None:
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
- @distributed_trace_async
- async def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> _models.Workspace:
- """Gets the properties of the specified machine learning workspace.
+ @distributed_trace
+ def list_by_subscription(
+ self, kind: Optional[str] = None, skip: Optional[str] = None, **kwargs: Any
+ ) -> AsyncIterable["_models.Workspace"]:
+ """Lists all the available machine learning workspaces under the specified subscription.
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
+ Lists all the available machine learning workspaces under the specified subscription.
+
+ :param kind: Kind of workspace. Default value is None.
+ :type kind: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: Workspace or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.Workspace
+ :return: An iterator like instance of either Workspace or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
"""
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
+
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -93,50 +103,169 @@ async def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any
}
error_map.update(kwargs.pop("error_map", {}) or {})
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_by_subscription_request(
+ subscription_id=self._config.subscription_id,
+ kind=kind,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.list_by_subscription.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list_by_subscription.metadata = {
+ "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/workspaces"
+ }
+
+ @distributed_trace
+ def list_by_resource_group(
+ self, resource_group_name: str, kind: Optional[str] = None, skip: Optional[str] = None, **kwargs: Any
+ ) -> AsyncIterable["_models.Workspace"]:
+ """Lists all the available machine learning workspaces under the specified resource group.
+
+ Lists all the available machine learning workspaces under the specified resource group.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param kind: Kind of workspace. Default value is None.
+ :type kind: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either Workspace or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
+ cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
- request = build_get_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.get.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
- _stream = False
- pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
+ def prepare_request(next_link=None):
+ if not next_link:
- response = pipeline_response.http_response
+ request = build_list_by_resource_group_request(
+ resource_group_name=resource_group_name,
+ subscription_id=self._config.subscription_id,
+ kind=kind,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.list_by_resource_group.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
- deserialized = self._deserialize("Workspace", pipeline_response)
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
- if cls:
- return cls(pipeline_response, deserialized, {})
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
- return deserialized
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ list_by_resource_group.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces"
}
- async def _create_or_update_initial(
- self, resource_group_name: str, workspace_name: str, parameters: Union[_models.Workspace, IO], **kwargs: Any
- ) -> Optional[_models.Workspace]:
+ async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, force_to_purge: bool = False, **kwargs: Any
+ ) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -145,30 +274,19 @@ async def _create_or_update_initial(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.Workspace]] = kwargs.pop("cls", None)
-
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(parameters, (IOBase, bytes)):
- _content = parameters
- else:
- _json = self._serialize.body(parameters, "Workspace")
+ cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_create_or_update_request(
+ request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
+ force_to_purge=force_to_purge,
api_version=api_version,
- content_type=content_type,
- json=_json,
- content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
+ template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
@@ -182,117 +300,38 @@ async def _create_or_update_initial(
response = pipeline_response.http_response
- if response.status_code not in [200, 202]:
+ if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
- if response.status_code == 200:
- deserialized = self._deserialize("Workspace", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, None, response_headers)
- return deserialized
-
- _create_or_update_initial.metadata = {
+ _delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
- @overload
- async def begin_create_or_update(
- self,
- resource_group_name: str,
- workspace_name: str,
- parameters: _models.Workspace,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> AsyncLROPoller[_models.Workspace]:
- """Creates or updates a workspace with the specified parameters.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :param parameters: The parameters for creating or updating a machine learning workspace.
- Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.Workspace
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- :return: An instance of AsyncLROPoller that returns either Workspace or the result of
- cls(response)
- :rtype:
- ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
- @overload
- async def begin_create_or_update(
- self,
- resource_group_name: str,
- workspace_name: str,
- parameters: IO,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> AsyncLROPoller[_models.Workspace]:
- """Creates or updates a workspace with the specified parameters.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :param parameters: The parameters for creating or updating a machine learning workspace.
- Required.
- :type parameters: IO
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- :return: An instance of AsyncLROPoller that returns either Workspace or the result of
- cls(response)
- :rtype:
- ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
@distributed_trace_async
- async def begin_create_or_update(
- self, resource_group_name: str, workspace_name: str, parameters: Union[_models.Workspace, IO], **kwargs: Any
- ) -> AsyncLROPoller[_models.Workspace]:
- """Creates or updates a workspace with the specified parameters.
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, force_to_purge: bool = False, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes a machine learning workspace.
+
+ Deletes a machine learning workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameters for creating or updating a machine learning workspace. Is
- either a Workspace type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.Workspace or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
+ :param force_to_purge: Flag to indicate delete is a purge request. Default value is False.
+ :type force_to_purge: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
@@ -301,28 +340,24 @@ async def begin_create_or_update(
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
- :return: An instance of AsyncLROPoller that returns either Workspace or the result of
- cls(response)
- :rtype:
- ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
+ cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._create_or_update_initial(
+ raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
- parameters=parameters,
+ force_to_purge=force_to_purge,
api_version=api_version,
- content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
@@ -330,14 +365,14 @@ async def begin_create_or_update(
)
kwargs.pop("error_map", None)
- def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Workspace", pipeline_response)
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, deserialized, {})
- return deserialized
+ return cls(pipeline_response, None, {})
if polling is True:
- polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
@@ -351,13 +386,26 @@ def get_long_running_output(pipeline_response):
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- begin_create_or_update.metadata = {
+ begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> None:
+ @distributed_trace_async
+ async def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> _models.Workspace:
+ """Gets the properties of the specified machine learning workspace.
+
+ Gets the properties of the specified machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: Workspace or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.Workspace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -370,14 +418,14 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
- request = build_delete_request(
+ request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
+ template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
@@ -389,81 +437,21 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
request, stream=_stream, **kwargs
)
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 202, 204]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
-
- if cls:
- return cls(pipeline_response, None, {})
-
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
-
- @distributed_trace_async
- async def begin_delete(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
- """Deletes a machine learning workspace.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
- :rtype: ~azure.core.polling.AsyncLROPoller[None]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
- polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- api_version=api_version,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs
- )
- kwargs.pop("error_map", None)
+ response = pipeline_response.http_response
- def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
- if cls:
- return cls(pipeline_response, None, {})
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if polling is True:
- polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
- elif polling is False:
- polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
- else:
- polling_method = polling
- if cont_token:
- return AsyncLROPoller.from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+ deserialized = self._deserialize("Workspace", pipeline_response)
- begin_delete.metadata = {
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
@@ -471,7 +459,7 @@ async def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Union[_models.WorkspaceUpdateParameters, IO],
+ body: Union[_models.WorkspaceUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.Workspace]:
error_map = {
@@ -492,10 +480,10 @@ async def _update_initial(
content_type = content_type or "application/json"
_json = None
_content = None
- if isinstance(parameters, (IOBase, bytes)):
- _content = parameters
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
else:
- _json = self._serialize.body(parameters, "WorkspaceUpdateParameters")
+ _json = self._serialize.body(body, "WorkspaceUpdateParameters")
request = build_update_request(
resource_group_name=resource_group_name,
@@ -542,20 +530,22 @@ async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: _models.WorkspaceUpdateParameters,
+ body: _models.WorkspaceUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Workspace]:
"""Updates a machine learning workspace with the specified parameters.
+ Updates a machine learning workspace with the specified parameters.
+
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameters for updating a machine learning workspace. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters
+ :param body: The parameters for updating a machine learning workspace. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
@@ -579,20 +569,22 @@ async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: IO,
+ body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Workspace]:
"""Updates a machine learning workspace with the specified parameters.
+ Updates a machine learning workspace with the specified parameters.
+
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameters for updating a machine learning workspace. Required.
- :type parameters: IO
+ :param body: The parameters for updating a machine learning workspace. Required.
+ :type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
@@ -616,19 +608,21 @@ async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Union[_models.WorkspaceUpdateParameters, IO],
+ body: Union[_models.WorkspaceUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.Workspace]:
"""Updates a machine learning workspace with the specified parameters.
+ Updates a machine learning workspace with the specified parameters.
+
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameters for updating a machine learning workspace. Is either a
+ :param body: The parameters for updating a machine learning workspace. Is either a
WorkspaceUpdateParameters type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters or IO
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
@@ -659,7 +653,7 @@ async def begin_update(
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
- parameters=parameters,
+ body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
@@ -694,104 +688,243 @@ def get_long_running_output(pipeline_response):
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
- @distributed_trace
- def list_by_resource_group(
- self, resource_group_name: str, skip: Optional[str] = None, **kwargs: Any
- ) -> AsyncIterable["_models.Workspace"]:
- """Lists all the available machine learning workspaces under the specified resource group.
+ async def _create_or_update_initial(
+ self, resource_group_name: str, workspace_name: str, body: Union[_models.Workspace, IO], **kwargs: Any
+ ) -> Optional[_models.Workspace]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.Workspace]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "Workspace")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("Workspace", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
+ }
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: _models.Workspace,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.Workspace]:
+ """Creates or updates a workspace with the specified parameters.
+
+ Creates or updates a workspace with the specified parameters.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param skip: Continuation token for pagination. Default value is None.
- :type skip: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: The parameters for creating or updating a machine learning workspace. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Workspace
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either Workspace or the result of cls(response)
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either Workspace or the result of
+ cls(response)
:rtype:
- ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.Workspace]:
+ """Creates or updates a workspace with the specified parameters.
- def prepare_request(next_link=None):
- if not next_link:
+ Creates or updates a workspace with the specified parameters.
- request = build_list_by_resource_group_request(
- resource_group_name=resource_group_name,
- subscription_id=self._config.subscription_id,
- skip=skip,
- api_version=api_version,
- template_url=self.list_by_resource_group.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: The parameters for creating or updating a machine learning workspace. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either Workspace or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
- else:
- # make call to next link with the client's api-version
- _parsed_next_link = urllib.parse.urlparse(next_link)
- _next_request_params = case_insensitive_dict(
- {
- key: [urllib.parse.quote(v) for v in value]
- for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
- }
- )
- _next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
- "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self, resource_group_name: str, workspace_name: str, body: Union[_models.Workspace, IO], **kwargs: Any
+ ) -> AsyncLROPoller[_models.Workspace]:
+ """Creates or updates a workspace with the specified parameters.
- async def extract_data(pipeline_response):
- deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
- list_of_elem = deserialized.value
- if cls:
- list_of_elem = cls(list_of_elem) # type: ignore
- return deserialized.next_link or None, AsyncList(list_of_elem)
+ Creates or updates a workspace with the specified parameters.
- async def get_next(next_link=None):
- request = prepare_request(next_link)
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: The parameters for creating or updating a machine learning workspace. Is either a
+ Workspace type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Workspace or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either Workspace or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- _stream = False
- pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ kwargs.pop("error_map", None)
- return pipeline_response
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("Workspace", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
- return AsyncItemPaged(get_next, extract_data)
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- list_by_resource_group.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces"
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
async def _diagnose_initial(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
+ body: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
**kwargs: Any
) -> Optional[_models.DiagnoseResponseResult]:
error_map = {
@@ -812,11 +945,11 @@ async def _diagnose_initial(
content_type = content_type or "application/json"
_json = None
_content = None
- if isinstance(parameters, (IOBase, bytes)):
- _content = parameters
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
else:
- if parameters is not None:
- _json = self._serialize.body(parameters, "DiagnoseWorkspaceParameters")
+ if body is not None:
+ _json = self._serialize.body(body, "DiagnoseWorkspaceParameters")
else:
_json = None
@@ -870,7 +1003,7 @@ async def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[_models.DiagnoseWorkspaceParameters] = None,
+ body: Optional[_models.DiagnoseWorkspaceParameters] = None,
*,
content_type: str = "application/json",
**kwargs: Any
@@ -882,10 +1015,10 @@ async def begin_diagnose(
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameter of diagnosing workspace health. Default value is None.
- :type parameters: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters
+ :param body: The parameter of diagnosing workspace health. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
@@ -909,7 +1042,7 @@ async def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[IO] = None,
+ body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
@@ -921,10 +1054,10 @@ async def begin_diagnose(
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameter of diagnosing workspace health. Default value is None.
- :type parameters: IO
+ :param body: The parameter of diagnosing workspace health. Default value is None.
+ :type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
@@ -948,7 +1081,7 @@ async def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
+ body: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
**kwargs: Any
) -> AsyncLROPoller[_models.DiagnoseResponseResult]:
"""Diagnose workspace setup issue.
@@ -958,11 +1091,11 @@ async def begin_diagnose(
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameter of diagnosing workspace health. Is either a
+ :param body: The parameter of diagnosing workspace health. Is either a
DiagnoseWorkspaceParameters type or a IO type. Default value is None.
- :type parameters: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters or IO
+ :type body: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
@@ -993,7 +1126,7 @@ async def begin_diagnose(
raw_result = await self._diagnose_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
- parameters=parameters,
+ body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
@@ -1037,10 +1170,13 @@ async def list_keys(
"""Lists all the keys associated with this workspace. This includes keys for the storage account,
app insights and password for container registry.
+ Lists all the keys associated with this workspace. This includes keys for the storage account,
+ app insights and password for container registry.
+
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListWorkspaceKeysResult or the result of cls(response)
@@ -1096,9 +1232,24 @@ async def list_keys(
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listKeys"
}
- async def _resync_keys_initial( # pylint: disable=inconsistent-return-statements
+ @distributed_trace_async
+ async def list_notebook_access_token(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> None:
+ ) -> _models.NotebookAccessTokenResult:
+ """Get Azure Machine Learning Workspace notebook access token.
+
+ Get Azure Machine Learning Workspace notebook access token.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: NotebookAccessTokenResult or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.NotebookAccessTokenResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -1111,14 +1262,14 @@ async def _resync_keys_initial( # pylint: disable=inconsistent-return-statement
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[_models.NotebookAccessTokenResult] = kwargs.pop("cls", None)
- request = build_resync_keys_request(
+ request = build_list_notebook_access_token_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._resync_keys_initial.metadata["url"],
+ template_url=self.list_notebook_access_token.metadata["url"],
headers=_headers,
params=_params,
)
@@ -1132,103 +1283,107 @@ async def _resync_keys_initial( # pylint: disable=inconsistent-return-statement
response = pipeline_response.http_response
- if response.status_code not in [200, 202]:
+ if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = self._deserialize("NotebookAccessTokenResult", pipeline_response)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, {})
- _resync_keys_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
+ return deserialized
+
+ list_notebook_access_token.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookAccessToken"
}
@distributed_trace_async
- async def begin_resync_keys(
+ async def list_notebook_keys(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> AsyncLROPoller[None]:
- """Resync all the keys associated with this workspace. This includes keys for the storage account,
- app insights and password for container registry.
+ ) -> _models.ListNotebookKeysResult:
+ """Lists keys of Azure Machine Learning Workspaces notebook.
+
+ Lists keys of Azure Machine Learning Workspaces notebook.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
- :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :return: ListNotebookKeysResult or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
- polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = await self._resync_keys_initial( # type: ignore
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- api_version=api_version,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs
- )
- kwargs.pop("error_map", None)
+ cls: ClsType[_models.ListNotebookKeysResult] = kwargs.pop("cls", None)
- def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
- if cls:
- return cls(pipeline_response, None, {})
+ request = build_list_notebook_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list_notebook_keys.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
- if polling is True:
- polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
- elif polling is False:
- polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
- else:
- polling_method = polling
- if cont_token:
- return AsyncLROPoller.from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
- begin_resync_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ListNotebookKeysResult", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ list_notebook_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookKeys"
}
- @distributed_trace
- def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> AsyncIterable["_models.Workspace"]:
- """Lists all the available machine learning workspaces under the specified subscription.
+ @distributed_trace_async
+ async def list_storage_account_keys(
+ self, resource_group_name: str, workspace_name: str, **kwargs: Any
+ ) -> _models.ListStorageAccountKeysResult:
+ """Lists keys of Azure Machine Learning Workspace's storage account.
- :param skip: Continuation token for pagination. Default value is None.
- :type skip: str
+ Lists keys of Azure Machine Learning Workspace's storage account.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either Workspace or the result of cls(response)
- :rtype:
- ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
+ :return: ListStorageAccountKeysResult or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ListStorageAccountKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
-
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -1237,81 +1392,65 @@ def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Asy
}
error_map.update(kwargs.pop("error_map", {}) or {})
- def prepare_request(next_link=None):
- if not next_link:
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- request = build_list_by_subscription_request(
- subscription_id=self._config.subscription_id,
- skip=skip,
- api_version=api_version,
- template_url=self.list_by_subscription.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ListStorageAccountKeysResult] = kwargs.pop("cls", None)
- else:
- # make call to next link with the client's api-version
- _parsed_next_link = urllib.parse.urlparse(next_link)
- _next_request_params = case_insensitive_dict(
- {
- key: [urllib.parse.quote(v) for v in value]
- for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
- }
- )
- _next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
- "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ request = build_list_storage_account_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list_storage_account_keys.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
- async def extract_data(pipeline_response):
- deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
- list_of_elem = deserialized.value
- if cls:
- list_of_elem = cls(list_of_elem) # type: ignore
- return deserialized.next_link or None, AsyncList(list_of_elem)
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
- async def get_next(next_link=None):
- request = prepare_request(next_link)
+ response = pipeline_response.http_response
- _stream = False
- pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
- response = pipeline_response.http_response
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = self._deserialize("ListStorageAccountKeysResult", pipeline_response)
- return pipeline_response
+ if cls:
+ return cls(pipeline_response, deserialized, {})
- return AsyncItemPaged(get_next, extract_data)
+ return deserialized
- list_by_subscription.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/workspaces"
+ list_storage_account_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listStorageAccountKeys"
}
@distributed_trace_async
- async def list_notebook_access_token(
+ async def list_outbound_network_dependencies_endpoints(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.NotebookAccessTokenResult:
- """return notebook access token and refresh token.
+ ) -> _models.ExternalFQDNResponse:
+ """Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
+ programmatically.
+
+ Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
+ programmatically.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: NotebookAccessTokenResult or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.NotebookAccessTokenResult
+ :return: ExternalFQDNResponse or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ExternalFQDNResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
@@ -1326,14 +1465,14 @@ async def list_notebook_access_token(
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.NotebookAccessTokenResult] = kwargs.pop("cls", None)
+ cls: ClsType[_models.ExternalFQDNResponse] = kwargs.pop("cls", None)
- request = build_list_notebook_access_token_request(
+ request = build_list_outbound_network_dependencies_endpoints_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_notebook_access_token.metadata["url"],
+ template_url=self.list_outbound_network_dependencies_endpoints.metadata["url"],
headers=_headers,
params=_params,
)
@@ -1352,15 +1491,15 @@ async def list_notebook_access_token(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("NotebookAccessTokenResult", pipeline_response)
+ deserialized = self._deserialize("ExternalFQDNResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
- list_notebook_access_token.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookAccessToken"
+ list_outbound_network_dependencies_endpoints.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundNetworkDependenciesEndpoints"
}
async def _prepare_notebook_initial(
@@ -1405,11 +1544,16 @@ async def _prepare_notebook_initial(
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
+ response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("NotebookResourceInfo", pipeline_response)
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, response_headers)
return deserialized
@@ -1421,12 +1565,14 @@ async def _prepare_notebook_initial(
async def begin_prepare_notebook(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.NotebookResourceInfo]:
- """Prepare a notebook.
+ """Prepare Azure Machine Learning Workspace's notebook resource.
+
+ Prepare Azure Machine Learning Workspace's notebook resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
@@ -1489,87 +1635,9 @@ def get_long_running_output(pipeline_response):
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/prepareNotebook"
}
- @distributed_trace_async
- async def list_storage_account_keys(
- self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.ListStorageAccountKeysResult:
- """List storage account keys of a workspace.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: ListStorageAccountKeysResult or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.ListStorageAccountKeysResult
- :raises ~azure.core.exceptions.HttpResponseError:
- """
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.ListStorageAccountKeysResult] = kwargs.pop("cls", None)
-
- request = build_list_storage_account_keys_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.list_storage_account_keys.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
-
- deserialized = self._deserialize("ListStorageAccountKeysResult", pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
-
- list_storage_account_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listStorageAccountKeys"
- }
-
- @distributed_trace_async
- async def list_notebook_keys(
+ async def _resync_keys_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.ListNotebookKeysResult:
- """List keys of a notebook.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: ListNotebookKeysResult or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
- :raises ~azure.core.exceptions.HttpResponseError:
- """
+ ) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -1582,14 +1650,14 @@ async def list_notebook_keys(
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.ListNotebookKeysResult] = kwargs.pop("cls", None)
+ cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_list_notebook_keys_request(
+ request = build_resync_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_notebook_keys.metadata["url"],
+ template_url=self._resync_keys_initial.metadata["url"],
headers=_headers,
params=_params,
)
@@ -1603,87 +1671,91 @@ async def list_notebook_keys(
response = pipeline_response.http_response
- if response.status_code not in [200]:
+ if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ListNotebookKeysResult", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, None, response_headers)
- list_notebook_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookKeys"
+ _resync_keys_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
}
@distributed_trace_async
- async def list_outbound_network_dependencies_endpoints(
+ async def begin_resync_keys(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.ExternalFQDNResponse:
- """Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
- programmatically.
+ ) -> AsyncLROPoller[None]:
+ """Resync all the keys associated with this workspace.This includes keys for the storage account,
+ app insights and password for container registry.
- Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
- programmatically.
+ Resync all the keys associated with this workspace.This includes keys for the storage account,
+ app insights and password for container registry.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: ExternalFQDNResponse or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.ExternalFQDNResponse
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
+ this operation to not poll, or pass in your own initialized polling object for a personal
+ polling strategy.
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.ExternalFQDNResponse] = kwargs.pop("cls", None)
-
- request = build_list_outbound_network_dependencies_endpoints_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.list_outbound_network_dependencies_endpoints.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
-
- deserialized = self._deserialize("ExternalFQDNResponse", pipeline_response)
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._resync_keys_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
- if cls:
- return cls(pipeline_response, deserialized, {})
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
- return deserialized
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- list_outbound_network_dependencies_endpoints.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundNetworkDependenciesEndpoints"
+ begin_resync_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
}
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/__init__.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/__init__.py
index 339c533836e3..9da599764fb3 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/__init__.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/__init__.py
@@ -9,23 +9,24 @@
from ._models_py3 import AKS
from ._models_py3 import AKSSchema
from ._models_py3 import AKSSchemaProperties
+from ._models_py3 import AccessKeyAuthTypeWorkspaceConnectionProperties
from ._models_py3 import AccountKeyDatastoreCredentials
from ._models_py3 import AccountKeyDatastoreSecrets
from ._models_py3 import AcrDetails
from ._models_py3 import AksComputeSecrets
from ._models_py3 import AksComputeSecretsProperties
from ._models_py3 import AksNetworkingConfiguration
+from ._models_py3 import AllFeatures
from ._models_py3 import AllNodes
from ._models_py3 import AmlCompute
from ._models_py3 import AmlComputeNodeInformation
from ._models_py3 import AmlComputeNodesInformation
from ._models_py3 import AmlComputeProperties
from ._models_py3 import AmlComputeSchema
-from ._models_py3 import AmlOperation
-from ._models_py3 import AmlOperationDisplay
-from ._models_py3 import AmlOperationListResult
from ._models_py3 import AmlToken
+from ._models_py3 import AmlTokenComputeIdentity
from ._models_py3 import AmlUserFeature
+from ._models_py3 import ApiKeyAuthWorkspaceConnectionProperties
from ._models_py3 import ArmResourceId
from ._models_py3 import AssetBase
from ._models_py3 import AssetContainer
@@ -33,6 +34,7 @@
from ._models_py3 import AssetJobOutput
from ._models_py3 import AssetReferenceBase
from ._models_py3 import AssignedUser
+from ._models_py3 import AutoDeleteSetting
from ._models_py3 import AutoForecastHorizon
from ._models_py3 import AutoMLJob
from ._models_py3 import AutoMLVertical
@@ -42,28 +44,45 @@
from ._models_py3 import AutoSeasonality
from ._models_py3 import AutoTargetLags
from ._models_py3 import AutoTargetRollingWindowSize
+from ._models_py3 import AutologgerSettings
from ._models_py3 import AzureBlobDatastore
from ._models_py3 import AzureDataLakeGen1Datastore
from ._models_py3 import AzureDataLakeGen2Datastore
+from ._models_py3 import AzureDatastore
+from ._models_py3 import AzureDevOpsWebhook
from ._models_py3 import AzureFileDatastore
+from ._models_py3 import AzureMLBatchInferencingServer
+from ._models_py3 import AzureMLOnlineInferencingServer
from ._models_py3 import BanditPolicy
+from ._models_py3 import BaseEnvironmentId
+from ._models_py3 import BaseEnvironmentSource
from ._models_py3 import BatchDeployment
+from ._models_py3 import BatchDeploymentConfiguration
from ._models_py3 import BatchDeploymentProperties
from ._models_py3 import BatchDeploymentTrackedResourceArmPaginatedResult
from ._models_py3 import BatchEndpoint
from ._models_py3 import BatchEndpointDefaults
from ._models_py3 import BatchEndpointProperties
from ._models_py3 import BatchEndpointTrackedResourceArmPaginatedResult
+from ._models_py3 import BatchPipelineComponentDeploymentConfiguration
from ._models_py3 import BatchRetrySettings
from ._models_py3 import BayesianSamplingAlgorithm
from ._models_py3 import BindOptions
from ._models_py3 import BlobReferenceForConsumptionDto
from ._models_py3 import BuildContext
+from ._models_py3 import CapacityReservationGroup
+from ._models_py3 import CapacityReservationGroupProperties
+from ._models_py3 import CapacityReservationGroupTrackedResourceArmPaginatedResult
+from ._models_py3 import CategoricalDataDriftMetricThreshold
+from ._models_py3 import CategoricalDataQualityMetricThreshold
+from ._models_py3 import CategoricalPredictionDriftMetricThreshold
from ._models_py3 import CertificateDatastoreCredentials
from ._models_py3 import CertificateDatastoreSecrets
from ._models_py3 import Classification
+from ._models_py3 import ClassificationModelPerformanceMetricThreshold
from ._models_py3 import ClassificationTrainingSettings
from ._models_py3 import ClusterUpdateParameters
+from ._models_py3 import CocoExportSummary
from ._models_py3 import CodeConfiguration
from ._models_py3 import CodeContainer
from ._models_py3 import CodeContainerProperties
@@ -71,9 +90,11 @@
from ._models_py3 import CodeVersion
from ._models_py3 import CodeVersionProperties
from ._models_py3 import CodeVersionResourceArmPaginatedResult
+from ._models_py3 import Collection
from ._models_py3 import ColumnTransformer
from ._models_py3 import CommandJob
from ._models_py3 import CommandJobLimits
+from ._models_py3 import ComponentConfiguration
from ._models_py3 import ComponentContainer
from ._models_py3 import ComponentContainerProperties
from ._models_py3 import ComponentContainerResourceArmPaginatedResult
@@ -83,6 +104,7 @@
from ._models_py3 import Compute
from ._models_py3 import ComputeInstance
from ._models_py3 import ComputeInstanceApplication
+from ._models_py3 import ComputeInstanceAutologgerSettings
from ._models_py3 import ComputeInstanceConnectivityEndpoints
from ._models_py3 import ComputeInstanceContainer
from ._models_py3 import ComputeInstanceCreatedBy
@@ -94,40 +116,58 @@
from ._models_py3 import ComputeInstanceSchema
from ._models_py3 import ComputeInstanceSshSettings
from ._models_py3 import ComputeInstanceVersion
+from ._models_py3 import ComputeRecurrenceSchedule
from ._models_py3 import ComputeResource
from ._models_py3 import ComputeResourceSchema
+from ._models_py3 import ComputeRuntimeDto
from ._models_py3 import ComputeSchedules
from ._models_py3 import ComputeSecrets
from ._models_py3 import ComputeStartStopSchedule
from ._models_py3 import ContainerResourceRequirements
from ._models_py3 import ContainerResourceSettings
from ._models_py3 import CosmosDbSettings
+from ._models_py3 import CreateMonitorAction
from ._models_py3 import Cron
from ._models_py3 import CronTrigger
+from ._models_py3 import CsvExportSummary
from ._models_py3 import CustomForecastHorizon
+from ._models_py3 import CustomInferencingServer
+from ._models_py3 import CustomKeys
+from ._models_py3 import CustomKeysWorkspaceConnectionProperties
+from ._models_py3 import CustomMetricThreshold
from ._models_py3 import CustomModelJobInput
from ._models_py3 import CustomModelJobOutput
+from ._models_py3 import CustomMonitoringSignal
from ._models_py3 import CustomNCrossValidations
from ._models_py3 import CustomSeasonality
from ._models_py3 import CustomService
from ._models_py3 import CustomTargetLags
from ._models_py3 import CustomTargetRollingWindowSize
+from ._models_py3 import DataCollector
from ._models_py3 import DataContainer
from ._models_py3 import DataContainerProperties
from ._models_py3 import DataContainerResourceArmPaginatedResult
+from ._models_py3 import DataDriftMetricThresholdBase
+from ._models_py3 import DataDriftMonitoringSignal
from ._models_py3 import DataFactory
+from ._models_py3 import DataImport
+from ._models_py3 import DataImportSource
from ._models_py3 import DataLakeAnalytics
from ._models_py3 import DataLakeAnalyticsSchema
from ._models_py3 import DataLakeAnalyticsSchemaProperties
from ._models_py3 import DataPathAssetReference
+from ._models_py3 import DataQualityMetricThresholdBase
+from ._models_py3 import DataQualityMonitoringSignal
from ._models_py3 import DataVersionBase
from ._models_py3 import DataVersionBaseProperties
from ._models_py3 import DataVersionBaseResourceArmPaginatedResult
+from ._models_py3 import DatabaseSource
from ._models_py3 import Databricks
from ._models_py3 import DatabricksComputeSecrets
from ._models_py3 import DatabricksComputeSecretsProperties
from ._models_py3 import DatabricksProperties
from ._models_py3 import DatabricksSchema
+from ._models_py3 import DatasetExportSummary
from ._models_py3 import Datastore
from ._models_py3 import DatastoreCredentials
from ._models_py3 import DatastoreProperties
@@ -145,8 +185,9 @@
from ._models_py3 import DistributionConfiguration
from ._models_py3 import Docker
from ._models_py3 import EarlyTerminationPolicy
-from ._models_py3 import EncryptionKeyVaultProperties
+from ._models_py3 import EncryptionKeyVaultUpdateProperties
from ._models_py3 import EncryptionProperty
+from ._models_py3 import EncryptionUpdateProperties
from ._models_py3 import Endpoint
from ._models_py3 import EndpointAuthKeys
from ._models_py3 import EndpointAuthToken
@@ -165,21 +206,54 @@
from ._models_py3 import ErrorResponse
from ._models_py3 import EstimatedVMPrice
from ._models_py3 import EstimatedVMPrices
+from ._models_py3 import ExportSummary
from ._models_py3 import ExternalFQDNResponse
from ._models_py3 import FQDNEndpoint
from ._models_py3 import FQDNEndpointDetail
from ._models_py3 import FQDNEndpoints
-from ._models_py3 import FQDNEndpointsProperties
+from ._models_py3 import FQDNEndpointsPropertyBag
+from ._models_py3 import Feature
+from ._models_py3 import FeatureAttributionDriftMonitoringSignal
+from ._models_py3 import FeatureAttributionMetricThreshold
+from ._models_py3 import FeatureImportanceSettings
+from ._models_py3 import FeatureProperties
+from ._models_py3 import FeatureResourceArmPaginatedResult
+from ._models_py3 import FeatureStoreSettings
+from ._models_py3 import FeatureSubset
+from ._models_py3 import FeatureWindow
+from ._models_py3 import FeaturesetContainer
+from ._models_py3 import FeaturesetContainerProperties
+from ._models_py3 import FeaturesetContainerResourceArmPaginatedResult
+from ._models_py3 import FeaturesetSpecification
+from ._models_py3 import FeaturesetVersion
+from ._models_py3 import FeaturesetVersionBackfillRequest
+from ._models_py3 import FeaturesetVersionBackfillResponse
+from ._models_py3 import FeaturesetVersionProperties
+from ._models_py3 import FeaturesetVersionResourceArmPaginatedResult
+from ._models_py3 import FeaturestoreEntityContainer
+from ._models_py3 import FeaturestoreEntityContainerProperties
+from ._models_py3 import FeaturestoreEntityContainerResourceArmPaginatedResult
+from ._models_py3 import FeaturestoreEntityVersion
+from ._models_py3 import FeaturestoreEntityVersionProperties
+from ._models_py3 import FeaturestoreEntityVersionResourceArmPaginatedResult
from ._models_py3 import FeaturizationSettings
+from ._models_py3 import FileSystemSource
+from ._models_py3 import FixedInputData
from ._models_py3 import FlavorData
from ._models_py3 import ForecastHorizon
from ._models_py3 import Forecasting
from ._models_py3 import ForecastingSettings
from ._models_py3 import ForecastingTrainingSettings
+from ._models_py3 import FqdnOutboundRule
+from ._models_py3 import GenerationSafetyQualityMetricThreshold
+from ._models_py3 import GenerationSafetyQualityMonitoringSignal
+from ._models_py3 import GenerationTokenUsageMetricThreshold
+from ._models_py3 import GenerationTokenUsageSignal
from ._models_py3 import GridSamplingAlgorithm
from ._models_py3 import HDInsight
from ._models_py3 import HDInsightProperties
from ._models_py3 import HDInsightSchema
+from ._models_py3 import HdfsDatastore
from ._models_py3 import IdAssetReference
from ._models_py3 import IdentityConfiguration
from ._models_py3 import IdentityForCmk
@@ -201,9 +275,13 @@
from ._models_py3 import ImageObjectDetectionBase
from ._models_py3 import ImageSweepSettings
from ._models_py3 import ImageVertical
+from ._models_py3 import ImportDataAction
+from ._models_py3 import IndexColumn
from ._models_py3 import InferenceContainerProperties
+from ._models_py3 import InferencingServer
from ._models_py3 import InstanceTypeSchema
from ._models_py3 import InstanceTypeSchemaResources
+from ._models_py3 import IntellectualProperty
from ._models_py3 import JobBase
from ._models_py3 import JobBaseProperties
from ._models_py3 import JobBaseResourceArmPaginatedResult
@@ -213,10 +291,27 @@
from ._models_py3 import JobResourceConfiguration
from ._models_py3 import JobScheduleAction
from ._models_py3 import JobService
+from ._models_py3 import KerberosCredentials
+from ._models_py3 import KerberosKeytabCredentials
+from ._models_py3 import KerberosKeytabSecrets
+from ._models_py3 import KerberosPasswordCredentials
+from ._models_py3 import KerberosPasswordSecrets
+from ._models_py3 import KeyVaultProperties
from ._models_py3 import Kubernetes
from ._models_py3 import KubernetesOnlineDeployment
from ._models_py3 import KubernetesProperties
from ._models_py3 import KubernetesSchema
+from ._models_py3 import LabelCategory
+from ._models_py3 import LabelClass
+from ._models_py3 import LabelingDataConfiguration
+from ._models_py3 import LabelingJob
+from ._models_py3 import LabelingJobImageProperties
+from ._models_py3 import LabelingJobInstructions
+from ._models_py3 import LabelingJobMediaProperties
+from ._models_py3 import LabelingJobProperties
+from ._models_py3 import LabelingJobResourceArmPaginatedResult
+from ._models_py3 import LabelingJobTextProperties
+from ._models_py3 import LakeHouseArtifact
from ._models_py3 import ListAmlUserFeatureResult
from ._models_py3 import ListNotebookKeysResult
from ._models_py3 import ListStorageAccountKeysResult
@@ -224,24 +319,53 @@
from ._models_py3 import ListWorkspaceKeysResult
from ._models_py3 import ListWorkspaceQuotas
from ._models_py3 import LiteralJobInput
+from ._models_py3 import MLAssistConfiguration
+from ._models_py3 import MLAssistConfigurationDisabled
+from ._models_py3 import MLAssistConfigurationEnabled
from ._models_py3 import MLFlowModelJobInput
from ._models_py3 import MLFlowModelJobOutput
from ._models_py3 import MLTableData
from ._models_py3 import MLTableJobInput
from ._models_py3 import MLTableJobOutput
+from ._models_py3 import ManagedComputeIdentity
from ._models_py3 import ManagedIdentity
from ._models_py3 import ManagedIdentityAuthTypeWorkspaceConnectionProperties
+from ._models_py3 import ManagedNetworkProvisionOptions
+from ._models_py3 import ManagedNetworkProvisionStatus
+from ._models_py3 import ManagedNetworkSettings
from ._models_py3 import ManagedOnlineDeployment
from ._models_py3 import ManagedServiceIdentity
+from ._models_py3 import MaterializationComputeResource
+from ._models_py3 import MaterializationSettings
from ._models_py3 import MedianStoppingPolicy
+from ._models_py3 import ModelConfiguration
from ._models_py3 import ModelContainer
from ._models_py3 import ModelContainerProperties
from ._models_py3 import ModelContainerResourceArmPaginatedResult
+from ._models_py3 import ModelPackageInput
+from ._models_py3 import ModelPerformanceMetricThresholdBase
+from ._models_py3 import ModelPerformanceSignal
from ._models_py3 import ModelVersion
from ._models_py3 import ModelVersionProperties
from ._models_py3 import ModelVersionResourceArmPaginatedResult
+from ._models_py3 import MonitorComputeConfigurationBase
+from ._models_py3 import MonitorComputeIdentityBase
+from ._models_py3 import MonitorDefinition
+from ._models_py3 import MonitorEmailNotificationSettings
+from ._models_py3 import MonitorNotificationSettings
+from ._models_py3 import MonitorServerlessSparkCompute
+from ._models_py3 import MonitoringDataSegment
+from ._models_py3 import MonitoringFeatureFilterBase
+from ._models_py3 import MonitoringInputDataBase
+from ._models_py3 import MonitoringSignalBase
+from ._models_py3 import MonitoringTarget
+from ._models_py3 import MonitoringThreshold
+from ._models_py3 import MonitoringWorkspaceConnection
from ._models_py3 import Mpi
from ._models_py3 import NCrossValidations
+from ._models_py3 import NlpFixedParameters
+from ._models_py3 import NlpParameterSubspace
+from ._models_py3 import NlpSweepSettings
from ._models_py3 import NlpVertical
from ._models_py3 import NlpVerticalFeaturizationSettings
from ._models_py3 import NlpVerticalLimitSettings
@@ -252,24 +376,48 @@
from ._models_py3 import NotebookAccessTokenResult
from ._models_py3 import NotebookPreparationError
from ._models_py3 import NotebookResourceInfo
+from ._models_py3 import NotificationSetting
+from ._models_py3 import NumericalDataDriftMetricThreshold
+from ._models_py3 import NumericalDataQualityMetricThreshold
+from ._models_py3 import NumericalPredictionDriftMetricThreshold
from ._models_py3 import Objective
+from ._models_py3 import OneLakeArtifact
+from ._models_py3 import OneLakeDatastore
from ._models_py3 import OnlineDeployment
from ._models_py3 import OnlineDeploymentProperties
from ._models_py3 import OnlineDeploymentTrackedResourceArmPaginatedResult
from ._models_py3 import OnlineEndpoint
from ._models_py3 import OnlineEndpointProperties
from ._models_py3 import OnlineEndpointTrackedResourceArmPaginatedResult
+from ._models_py3 import OnlineInferenceConfiguration
from ._models_py3 import OnlineRequestSettings
from ._models_py3 import OnlineScaleSettings
+from ._models_py3 import Operation
+from ._models_py3 import OperationDisplay
+from ._models_py3 import OperationListResult
+from ._models_py3 import OsPatchingStatus
+from ._models_py3 import OutboundRule
+from ._models_py3 import OutboundRuleBasicResource
+from ._models_py3 import OutboundRuleListResult
from ._models_py3 import OutputPathAssetReference
from ._models_py3 import PATAuthTypeWorkspaceConnectionProperties
+from ._models_py3 import PackageInputPathBase
+from ._models_py3 import PackageInputPathId
+from ._models_py3 import PackageInputPathUrl
+from ._models_py3 import PackageInputPathVersion
+from ._models_py3 import PackageRequest
+from ._models_py3 import PackageResponse
from ._models_py3 import PaginatedComputeResourcesList
from ._models_py3 import PartialBatchDeployment
from ._models_py3 import PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties
+from ._models_py3 import PartialJobBase
+from ._models_py3 import PartialJobBasePartialResource
from ._models_py3 import PartialManagedServiceIdentity
from ._models_py3 import PartialMinimalTrackedResource
from ._models_py3 import PartialMinimalTrackedResourceWithIdentity
from ._models_py3 import PartialMinimalTrackedResourceWithSku
+from ._models_py3 import PartialMinimalTrackedResourceWithSkuAndIdentity
+from ._models_py3 import PartialNotificationSetting
from ._models_py3 import PartialRegistryPartialTrackedResource
from ._models_py3 import PartialSku
from ._models_py3 import Password
@@ -278,18 +426,26 @@
from ._models_py3 import PendingUploadResponseDto
from ._models_py3 import PersonalComputeInstanceSettings
from ._models_py3 import PipelineJob
+from ._models_py3 import PredictionDriftMetricThresholdBase
+from ._models_py3 import PredictionDriftMonitoringSignal
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionListResult
+from ._models_py3 import PrivateEndpointDestination
+from ._models_py3 import PrivateEndpointOutboundRule
from ._models_py3 import PrivateEndpointResource
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourceListResult
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import ProbeSettings
+from ._models_py3 import ProgressMetrics
+from ._models_py3 import ProxyResource
from ._models_py3 import PyTorch
+from ._models_py3 import QueueSettings
from ._models_py3 import QuotaBaseProperties
from ._models_py3 import QuotaUpdateParameters
from ._models_py3 import RandomSamplingAlgorithm
+from ._models_py3 import Ray
from ._models_py3 import Recurrence
from ._models_py3 import RecurrenceSchedule
from ._models_py3 import RecurrenceTrigger
@@ -302,13 +458,17 @@
from ._models_py3 import RegistryRegionArmDetails
from ._models_py3 import RegistryTrackedResourceArmPaginatedResult
from ._models_py3 import Regression
+from ._models_py3 import RegressionModelPerformanceMetricThreshold
from ._models_py3 import RegressionTrainingSettings
+from ._models_py3 import RequestLogging
+from ._models_py3 import ResizeSchema
from ._models_py3 import Resource
from ._models_py3 import ResourceBase
from ._models_py3 import ResourceConfiguration
from ._models_py3 import ResourceId
from ._models_py3 import ResourceName
from ._models_py3 import ResourceQuota
+from ._models_py3 import RollingInputData
from ._models_py3 import Route
from ._models_py3 import SASAuthTypeWorkspaceConnectionProperties
from ._models_py3 import SASCredentialDto
@@ -325,9 +485,20 @@
from ._models_py3 import ScriptReference
from ._models_py3 import ScriptsToExecute
from ._models_py3 import Seasonality
+from ._models_py3 import SecretConfiguration
+from ._models_py3 import ServerlessEndpoint
+from ._models_py3 import ServerlessEndpointCapacityReservation
+from ._models_py3 import ServerlessEndpointProperties
+from ._models_py3 import ServerlessEndpointStatus
+from ._models_py3 import ServerlessEndpointTrackedResourceArmPaginatedResult
+from ._models_py3 import ServerlessInferenceEndpoint
+from ._models_py3 import ServerlessOffer
from ._models_py3 import ServiceManagedResourcesSettings
+from ._models_py3 import ServicePrincipalAuthTypeWorkspaceConnectionProperties
from ._models_py3 import ServicePrincipalDatastoreCredentials
from ._models_py3 import ServicePrincipalDatastoreSecrets
+from ._models_py3 import ServiceTagDestination
+from ._models_py3 import ServiceTagOutboundRule
from ._models_py3 import SetupScripts
from ._models_py3 import SharedPrivateLinkResource
from ._models_py3 import Sku
@@ -335,8 +506,15 @@
from ._models_py3 import SkuResource
from ._models_py3 import SkuResourceArmPaginatedResult
from ._models_py3 import SkuSetting
+from ._models_py3 import SparkJob
+from ._models_py3 import SparkJobEntry
+from ._models_py3 import SparkJobPythonEntry
+from ._models_py3 import SparkJobScalaEntry
+from ._models_py3 import SparkResourceConfiguration
from ._models_py3 import SslConfiguration
from ._models_py3 import StackEnsembleSettings
+from ._models_py3 import StaticInputData
+from ._models_py3 import StatusMessage
from ._models_py3 import StorageAccountDetails
from ._models_py3 import SweepJob
from ._models_py3 import SweepJobLimits
@@ -346,6 +524,9 @@
from ._models_py3 import SystemCreatedStorageAccount
from ._models_py3 import SystemData
from ._models_py3 import SystemService
+from ._models_py3 import TableFixedParameters
+from ._models_py3 import TableParameterSubspace
+from ._models_py3 import TableSweepSettings
from ._models_py3 import TableVertical
from ._models_py3 import TableVerticalFeaturizationSettings
from ._models_py3 import TableVerticalLimitSettings
@@ -357,10 +538,12 @@
from ._models_py3 import TextClassificationMultilabel
from ._models_py3 import TextNer
from ._models_py3 import TmpfsOptions
+from ._models_py3 import TopNFeaturesByAttribution
from ._models_py3 import TrackedResource
from ._models_py3 import TrainingSettings
from ._models_py3 import TrialComponent
from ._models_py3 import TriggerBase
+from ._models_py3 import TritonInferencingServer
from ._models_py3 import TritonModelJobInput
from ._models_py3 import TritonModelJobOutput
from ._models_py3 import TruncationSelectionPolicy
@@ -391,27 +574,42 @@
from ._models_py3 import VirtualMachineSshCredentials
from ._models_py3 import VolumeDefinition
from ._models_py3 import VolumeOptions
+from ._models_py3 import Webhook
from ._models_py3 import Workspace
+from ._models_py3 import WorkspaceConnectionAccessKey
+from ._models_py3 import WorkspaceConnectionApiKey
from ._models_py3 import WorkspaceConnectionManagedIdentity
from ._models_py3 import WorkspaceConnectionPersonalAccessToken
from ._models_py3 import WorkspaceConnectionPropertiesV2
from ._models_py3 import WorkspaceConnectionPropertiesV2BasicResource
from ._models_py3 import WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult
+from ._models_py3 import WorkspaceConnectionServicePrincipal
from ._models_py3 import WorkspaceConnectionSharedAccessSignature
+from ._models_py3 import WorkspaceConnectionUpdateParameter
from ._models_py3 import WorkspaceConnectionUsernamePassword
+from ._models_py3 import WorkspaceHubConfig
from ._models_py3 import WorkspaceListResult
+from ._models_py3 import WorkspacePrivateEndpointResource
from ._models_py3 import WorkspaceUpdateParameters
+from ._machine_learning_services_mgmt_client_enums import ActionType
from ._machine_learning_services_mgmt_client_enums import AllocationState
from ._machine_learning_services_mgmt_client_enums import ApplicationSharingPolicy
from ._machine_learning_services_mgmt_client_enums import AssetProvisioningState
+from ._machine_learning_services_mgmt_client_enums import AutoDeleteCondition
from ._machine_learning_services_mgmt_client_enums import AutoRebuildSetting
from ._machine_learning_services_mgmt_client_enums import Autosave
+from ._machine_learning_services_mgmt_client_enums import BaseEnvironmentSourceType
+from ._machine_learning_services_mgmt_client_enums import BatchDeploymentConfigurationType
from ._machine_learning_services_mgmt_client_enums import BatchLoggingLevel
from ._machine_learning_services_mgmt_client_enums import BatchOutputAction
from ._machine_learning_services_mgmt_client_enums import BillingCurrency
from ._machine_learning_services_mgmt_client_enums import BlockedTransformers
from ._machine_learning_services_mgmt_client_enums import Caching
+from ._machine_learning_services_mgmt_client_enums import CategoricalDataDriftMetric
+from ._machine_learning_services_mgmt_client_enums import CategoricalDataQualityMetric
+from ._machine_learning_services_mgmt_client_enums import CategoricalPredictionDriftMetric
+from ._machine_learning_services_mgmt_client_enums import ClassificationModelPerformanceMetric
from ._machine_learning_services_mgmt_client_enums import ClassificationModels
from ._machine_learning_services_mgmt_client_enums import ClassificationMultilabelPrimaryMetrics
from ._machine_learning_services_mgmt_client_enums import ClassificationPrimaryMetrics
@@ -419,12 +617,18 @@
from ._machine_learning_services_mgmt_client_enums import ComputeInstanceAuthorizationType
from ._machine_learning_services_mgmt_client_enums import ComputeInstanceState
from ._machine_learning_services_mgmt_client_enums import ComputePowerAction
+from ._machine_learning_services_mgmt_client_enums import ComputeRecurrenceFrequency
+from ._machine_learning_services_mgmt_client_enums import ComputeTriggerType
from ._machine_learning_services_mgmt_client_enums import ComputeType
+from ._machine_learning_services_mgmt_client_enums import ComputeWeekDay
from ._machine_learning_services_mgmt_client_enums import ConnectionAuthType
from ._machine_learning_services_mgmt_client_enums import ConnectionCategory
from ._machine_learning_services_mgmt_client_enums import ContainerType
from ._machine_learning_services_mgmt_client_enums import CreatedByType
from ._machine_learning_services_mgmt_client_enums import CredentialsType
+from ._machine_learning_services_mgmt_client_enums import DataAvailabilityStatus
+from ._machine_learning_services_mgmt_client_enums import DataCollectionMode
+from ._machine_learning_services_mgmt_client_enums import DataImportSourceType
from ._machine_learning_services_mgmt_client_enums import DataType
from ._machine_learning_services_mgmt_client_enums import DatastoreType
from ._machine_learning_services_mgmt_client_enums import DeploymentProvisioningState
@@ -432,6 +636,7 @@
from ._machine_learning_services_mgmt_client_enums import DistributionType
from ._machine_learning_services_mgmt_client_enums import EarlyTerminationPolicyType
from ._machine_learning_services_mgmt_client_enums import EgressPublicNetworkAccessType
+from ._machine_learning_services_mgmt_client_enums import EmailNotificationEnableType
from ._machine_learning_services_mgmt_client_enums import EncryptionStatus
from ._machine_learning_services_mgmt_client_enums import EndpointAuthMode
from ._machine_learning_services_mgmt_client_enums import EndpointComputeType
@@ -439,58 +644,104 @@
from ._machine_learning_services_mgmt_client_enums import EndpointServiceConnectionStatus
from ._machine_learning_services_mgmt_client_enums import EnvironmentType
from ._machine_learning_services_mgmt_client_enums import EnvironmentVariableType
+from ._machine_learning_services_mgmt_client_enums import ExportFormatType
+from ._machine_learning_services_mgmt_client_enums import FeatureAttributionMetric
+from ._machine_learning_services_mgmt_client_enums import FeatureDataType
+from ._machine_learning_services_mgmt_client_enums import FeatureImportanceMode
from ._machine_learning_services_mgmt_client_enums import FeatureLags
from ._machine_learning_services_mgmt_client_enums import FeaturizationMode
from ._machine_learning_services_mgmt_client_enums import ForecastHorizonMode
from ._machine_learning_services_mgmt_client_enums import ForecastingModels
from ._machine_learning_services_mgmt_client_enums import ForecastingPrimaryMetrics
+from ._machine_learning_services_mgmt_client_enums import GenerationSafetyQualityMetric
+from ._machine_learning_services_mgmt_client_enums import GenerationTokenUsageMetric
from ._machine_learning_services_mgmt_client_enums import Goal
from ._machine_learning_services_mgmt_client_enums import IdentityConfigurationType
+from ._machine_learning_services_mgmt_client_enums import ImageAnnotationType
from ._machine_learning_services_mgmt_client_enums import ImageType
+from ._machine_learning_services_mgmt_client_enums import IncrementalDataRefresh
+from ._machine_learning_services_mgmt_client_enums import InferencingServerType
from ._machine_learning_services_mgmt_client_enums import InputDeliveryMode
+from ._machine_learning_services_mgmt_client_enums import InputPathType
from ._machine_learning_services_mgmt_client_enums import InstanceSegmentationPrimaryMetrics
+from ._machine_learning_services_mgmt_client_enums import IsolationMode
from ._machine_learning_services_mgmt_client_enums import JobInputType
from ._machine_learning_services_mgmt_client_enums import JobLimitsType
from ._machine_learning_services_mgmt_client_enums import JobOutputType
+from ._machine_learning_services_mgmt_client_enums import JobProvisioningState
from ._machine_learning_services_mgmt_client_enums import JobStatus
+from ._machine_learning_services_mgmt_client_enums import JobTier
from ._machine_learning_services_mgmt_client_enums import JobType
from ._machine_learning_services_mgmt_client_enums import KeyType
from ._machine_learning_services_mgmt_client_enums import LearningRateScheduler
from ._machine_learning_services_mgmt_client_enums import ListViewType
from ._machine_learning_services_mgmt_client_enums import LoadBalancerType
+from ._machine_learning_services_mgmt_client_enums import LogTrainingMetrics
+from ._machine_learning_services_mgmt_client_enums import LogValidationLoss
from ._machine_learning_services_mgmt_client_enums import LogVerbosity
+from ._machine_learning_services_mgmt_client_enums import MLAssistConfigurationType
+from ._machine_learning_services_mgmt_client_enums import MLFlowAutologgerState
+from ._machine_learning_services_mgmt_client_enums import ManagedNetworkStatus
from ._machine_learning_services_mgmt_client_enums import ManagedServiceIdentityType
+from ._machine_learning_services_mgmt_client_enums import MaterializationStoreType
+from ._machine_learning_services_mgmt_client_enums import MediaType
+from ._machine_learning_services_mgmt_client_enums import MlflowAutologger
from ._machine_learning_services_mgmt_client_enums import ModelSize
+from ._machine_learning_services_mgmt_client_enums import ModelTaskType
+from ._machine_learning_services_mgmt_client_enums import MonitorComputeIdentityType
+from ._machine_learning_services_mgmt_client_enums import MonitorComputeType
+from ._machine_learning_services_mgmt_client_enums import MonitoringFeatureDataType
+from ._machine_learning_services_mgmt_client_enums import MonitoringFeatureFilterType
+from ._machine_learning_services_mgmt_client_enums import MonitoringInputDataType
+from ._machine_learning_services_mgmt_client_enums import MonitoringModelType
+from ._machine_learning_services_mgmt_client_enums import MonitoringNotificationType
+from ._machine_learning_services_mgmt_client_enums import MonitoringSignalType
from ._machine_learning_services_mgmt_client_enums import MountAction
from ._machine_learning_services_mgmt_client_enums import MountState
+from ._machine_learning_services_mgmt_client_enums import MultiSelect
from ._machine_learning_services_mgmt_client_enums import NCrossValidationsMode
from ._machine_learning_services_mgmt_client_enums import Network
+from ._machine_learning_services_mgmt_client_enums import NlpLearningRateScheduler
from ._machine_learning_services_mgmt_client_enums import NodeState
from ._machine_learning_services_mgmt_client_enums import NodesValueType
+from ._machine_learning_services_mgmt_client_enums import NumericalDataDriftMetric
+from ._machine_learning_services_mgmt_client_enums import NumericalDataQualityMetric
+from ._machine_learning_services_mgmt_client_enums import NumericalPredictionDriftMetric
from ._machine_learning_services_mgmt_client_enums import ObjectDetectionPrimaryMetrics
+from ._machine_learning_services_mgmt_client_enums import OneLakeArtifactType
from ._machine_learning_services_mgmt_client_enums import OperatingSystemType
from ._machine_learning_services_mgmt_client_enums import OperationName
from ._machine_learning_services_mgmt_client_enums import OperationStatus
from ._machine_learning_services_mgmt_client_enums import OperationTrigger
from ._machine_learning_services_mgmt_client_enums import OrderString
+from ._machine_learning_services_mgmt_client_enums import Origin
from ._machine_learning_services_mgmt_client_enums import OsType
from ._machine_learning_services_mgmt_client_enums import OutputDeliveryMode
+from ._machine_learning_services_mgmt_client_enums import PackageBuildState
+from ._machine_learning_services_mgmt_client_enums import PackageInputDeliveryMode
+from ._machine_learning_services_mgmt_client_enums import PackageInputType
+from ._machine_learning_services_mgmt_client_enums import PatchStatus
from ._machine_learning_services_mgmt_client_enums import PendingUploadCredentialType
from ._machine_learning_services_mgmt_client_enums import PendingUploadType
from ._machine_learning_services_mgmt_client_enums import PrivateEndpointConnectionProvisioningState
-from ._machine_learning_services_mgmt_client_enums import PrivateEndpointServiceConnectionStatus
+from ._machine_learning_services_mgmt_client_enums import ProtectionLevel
from ._machine_learning_services_mgmt_client_enums import Protocol
from ._machine_learning_services_mgmt_client_enums import ProvisioningState
from ._machine_learning_services_mgmt_client_enums import ProvisioningStatus
-from ._machine_learning_services_mgmt_client_enums import PublicNetworkAccess
from ._machine_learning_services_mgmt_client_enums import PublicNetworkAccessType
from ._machine_learning_services_mgmt_client_enums import QuotaUnit
from ._machine_learning_services_mgmt_client_enums import RandomSamplingAlgorithmRule
from ._machine_learning_services_mgmt_client_enums import RecurrenceFrequency
from ._machine_learning_services_mgmt_client_enums import ReferenceType
+from ._machine_learning_services_mgmt_client_enums import RegressionModelPerformanceMetric
from ._machine_learning_services_mgmt_client_enums import RegressionModels
from ._machine_learning_services_mgmt_client_enums import RegressionPrimaryMetrics
from ._machine_learning_services_mgmt_client_enums import RemoteLoginPortPublicAccess
+from ._machine_learning_services_mgmt_client_enums import RollingRateType
+from ._machine_learning_services_mgmt_client_enums import RuleAction
+from ._machine_learning_services_mgmt_client_enums import RuleCategory
+from ._machine_learning_services_mgmt_client_enums import RuleStatus
+from ._machine_learning_services_mgmt_client_enums import RuleType
from ._machine_learning_services_mgmt_client_enums import SamplingAlgorithmType
from ._machine_learning_services_mgmt_client_enums import ScaleType
from ._machine_learning_services_mgmt_client_enums import ScheduleActionType
@@ -500,21 +751,26 @@
from ._machine_learning_services_mgmt_client_enums import ScheduleStatus
from ._machine_learning_services_mgmt_client_enums import SeasonalityMode
from ._machine_learning_services_mgmt_client_enums import SecretsType
+from ._machine_learning_services_mgmt_client_enums import ServerlessInferenceEndpointAuthMode
from ._machine_learning_services_mgmt_client_enums import ServiceDataAccessAuthIdentity
from ._machine_learning_services_mgmt_client_enums import ShortSeriesHandlingConfiguration
from ._machine_learning_services_mgmt_client_enums import SkuScaleType
from ._machine_learning_services_mgmt_client_enums import SkuTier
from ._machine_learning_services_mgmt_client_enums import SourceType
+from ._machine_learning_services_mgmt_client_enums import SparkJobEntryType
from ._machine_learning_services_mgmt_client_enums import SshPublicAccess
from ._machine_learning_services_mgmt_client_enums import SslConfigStatus
from ._machine_learning_services_mgmt_client_enums import StackMetaLearnerType
from ._machine_learning_services_mgmt_client_enums import Status
+from ._machine_learning_services_mgmt_client_enums import StatusMessageLevel
from ._machine_learning_services_mgmt_client_enums import StochasticOptimizer
from ._machine_learning_services_mgmt_client_enums import StorageAccountType
from ._machine_learning_services_mgmt_client_enums import TargetAggregationFunction
from ._machine_learning_services_mgmt_client_enums import TargetLagsMode
from ._machine_learning_services_mgmt_client_enums import TargetRollingWindowSizeMode
from ._machine_learning_services_mgmt_client_enums import TaskType
+from ._machine_learning_services_mgmt_client_enums import TextAnnotationType
+from ._machine_learning_services_mgmt_client_enums import TrainingMode
from ._machine_learning_services_mgmt_client_enums import TriggerType
from ._machine_learning_services_mgmt_client_enums import UnderlyingResourceAction
from ._machine_learning_services_mgmt_client_enums import UnitOfMeasure
@@ -523,9 +779,9 @@
from ._machine_learning_services_mgmt_client_enums import VMPriceOSType
from ._machine_learning_services_mgmt_client_enums import VMTier
from ._machine_learning_services_mgmt_client_enums import ValidationMetricType
-from ._machine_learning_services_mgmt_client_enums import ValueFormat
from ._machine_learning_services_mgmt_client_enums import VmPriority
from ._machine_learning_services_mgmt_client_enums import VolumeDefinitionType
+from ._machine_learning_services_mgmt_client_enums import WebhookType
from ._machine_learning_services_mgmt_client_enums import WeekDay
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
@@ -535,23 +791,24 @@
"AKS",
"AKSSchema",
"AKSSchemaProperties",
+ "AccessKeyAuthTypeWorkspaceConnectionProperties",
"AccountKeyDatastoreCredentials",
"AccountKeyDatastoreSecrets",
"AcrDetails",
"AksComputeSecrets",
"AksComputeSecretsProperties",
"AksNetworkingConfiguration",
+ "AllFeatures",
"AllNodes",
"AmlCompute",
"AmlComputeNodeInformation",
"AmlComputeNodesInformation",
"AmlComputeProperties",
"AmlComputeSchema",
- "AmlOperation",
- "AmlOperationDisplay",
- "AmlOperationListResult",
"AmlToken",
+ "AmlTokenComputeIdentity",
"AmlUserFeature",
+ "ApiKeyAuthWorkspaceConnectionProperties",
"ArmResourceId",
"AssetBase",
"AssetContainer",
@@ -559,6 +816,7 @@
"AssetJobOutput",
"AssetReferenceBase",
"AssignedUser",
+ "AutoDeleteSetting",
"AutoForecastHorizon",
"AutoMLJob",
"AutoMLVertical",
@@ -568,28 +826,45 @@
"AutoSeasonality",
"AutoTargetLags",
"AutoTargetRollingWindowSize",
+ "AutologgerSettings",
"AzureBlobDatastore",
"AzureDataLakeGen1Datastore",
"AzureDataLakeGen2Datastore",
+ "AzureDatastore",
+ "AzureDevOpsWebhook",
"AzureFileDatastore",
+ "AzureMLBatchInferencingServer",
+ "AzureMLOnlineInferencingServer",
"BanditPolicy",
+ "BaseEnvironmentId",
+ "BaseEnvironmentSource",
"BatchDeployment",
+ "BatchDeploymentConfiguration",
"BatchDeploymentProperties",
"BatchDeploymentTrackedResourceArmPaginatedResult",
"BatchEndpoint",
"BatchEndpointDefaults",
"BatchEndpointProperties",
"BatchEndpointTrackedResourceArmPaginatedResult",
+ "BatchPipelineComponentDeploymentConfiguration",
"BatchRetrySettings",
"BayesianSamplingAlgorithm",
"BindOptions",
"BlobReferenceForConsumptionDto",
"BuildContext",
+ "CapacityReservationGroup",
+ "CapacityReservationGroupProperties",
+ "CapacityReservationGroupTrackedResourceArmPaginatedResult",
+ "CategoricalDataDriftMetricThreshold",
+ "CategoricalDataQualityMetricThreshold",
+ "CategoricalPredictionDriftMetricThreshold",
"CertificateDatastoreCredentials",
"CertificateDatastoreSecrets",
"Classification",
+ "ClassificationModelPerformanceMetricThreshold",
"ClassificationTrainingSettings",
"ClusterUpdateParameters",
+ "CocoExportSummary",
"CodeConfiguration",
"CodeContainer",
"CodeContainerProperties",
@@ -597,9 +872,11 @@
"CodeVersion",
"CodeVersionProperties",
"CodeVersionResourceArmPaginatedResult",
+ "Collection",
"ColumnTransformer",
"CommandJob",
"CommandJobLimits",
+ "ComponentConfiguration",
"ComponentContainer",
"ComponentContainerProperties",
"ComponentContainerResourceArmPaginatedResult",
@@ -609,6 +886,7 @@
"Compute",
"ComputeInstance",
"ComputeInstanceApplication",
+ "ComputeInstanceAutologgerSettings",
"ComputeInstanceConnectivityEndpoints",
"ComputeInstanceContainer",
"ComputeInstanceCreatedBy",
@@ -620,40 +898,58 @@
"ComputeInstanceSchema",
"ComputeInstanceSshSettings",
"ComputeInstanceVersion",
+ "ComputeRecurrenceSchedule",
"ComputeResource",
"ComputeResourceSchema",
+ "ComputeRuntimeDto",
"ComputeSchedules",
"ComputeSecrets",
"ComputeStartStopSchedule",
"ContainerResourceRequirements",
"ContainerResourceSettings",
"CosmosDbSettings",
+ "CreateMonitorAction",
"Cron",
"CronTrigger",
+ "CsvExportSummary",
"CustomForecastHorizon",
+ "CustomInferencingServer",
+ "CustomKeys",
+ "CustomKeysWorkspaceConnectionProperties",
+ "CustomMetricThreshold",
"CustomModelJobInput",
"CustomModelJobOutput",
+ "CustomMonitoringSignal",
"CustomNCrossValidations",
"CustomSeasonality",
"CustomService",
"CustomTargetLags",
"CustomTargetRollingWindowSize",
+ "DataCollector",
"DataContainer",
"DataContainerProperties",
"DataContainerResourceArmPaginatedResult",
+ "DataDriftMetricThresholdBase",
+ "DataDriftMonitoringSignal",
"DataFactory",
+ "DataImport",
+ "DataImportSource",
"DataLakeAnalytics",
"DataLakeAnalyticsSchema",
"DataLakeAnalyticsSchemaProperties",
"DataPathAssetReference",
+ "DataQualityMetricThresholdBase",
+ "DataQualityMonitoringSignal",
"DataVersionBase",
"DataVersionBaseProperties",
"DataVersionBaseResourceArmPaginatedResult",
+ "DatabaseSource",
"Databricks",
"DatabricksComputeSecrets",
"DatabricksComputeSecretsProperties",
"DatabricksProperties",
"DatabricksSchema",
+ "DatasetExportSummary",
"Datastore",
"DatastoreCredentials",
"DatastoreProperties",
@@ -671,8 +967,9 @@
"DistributionConfiguration",
"Docker",
"EarlyTerminationPolicy",
- "EncryptionKeyVaultProperties",
+ "EncryptionKeyVaultUpdateProperties",
"EncryptionProperty",
+ "EncryptionUpdateProperties",
"Endpoint",
"EndpointAuthKeys",
"EndpointAuthToken",
@@ -691,21 +988,54 @@
"ErrorResponse",
"EstimatedVMPrice",
"EstimatedVMPrices",
+ "ExportSummary",
"ExternalFQDNResponse",
"FQDNEndpoint",
"FQDNEndpointDetail",
"FQDNEndpoints",
- "FQDNEndpointsProperties",
+ "FQDNEndpointsPropertyBag",
+ "Feature",
+ "FeatureAttributionDriftMonitoringSignal",
+ "FeatureAttributionMetricThreshold",
+ "FeatureImportanceSettings",
+ "FeatureProperties",
+ "FeatureResourceArmPaginatedResult",
+ "FeatureStoreSettings",
+ "FeatureSubset",
+ "FeatureWindow",
+ "FeaturesetContainer",
+ "FeaturesetContainerProperties",
+ "FeaturesetContainerResourceArmPaginatedResult",
+ "FeaturesetSpecification",
+ "FeaturesetVersion",
+ "FeaturesetVersionBackfillRequest",
+ "FeaturesetVersionBackfillResponse",
+ "FeaturesetVersionProperties",
+ "FeaturesetVersionResourceArmPaginatedResult",
+ "FeaturestoreEntityContainer",
+ "FeaturestoreEntityContainerProperties",
+ "FeaturestoreEntityContainerResourceArmPaginatedResult",
+ "FeaturestoreEntityVersion",
+ "FeaturestoreEntityVersionProperties",
+ "FeaturestoreEntityVersionResourceArmPaginatedResult",
"FeaturizationSettings",
+ "FileSystemSource",
+ "FixedInputData",
"FlavorData",
"ForecastHorizon",
"Forecasting",
"ForecastingSettings",
"ForecastingTrainingSettings",
+ "FqdnOutboundRule",
+ "GenerationSafetyQualityMetricThreshold",
+ "GenerationSafetyQualityMonitoringSignal",
+ "GenerationTokenUsageMetricThreshold",
+ "GenerationTokenUsageSignal",
"GridSamplingAlgorithm",
"HDInsight",
"HDInsightProperties",
"HDInsightSchema",
+ "HdfsDatastore",
"IdAssetReference",
"IdentityConfiguration",
"IdentityForCmk",
@@ -727,9 +1057,13 @@
"ImageObjectDetectionBase",
"ImageSweepSettings",
"ImageVertical",
+ "ImportDataAction",
+ "IndexColumn",
"InferenceContainerProperties",
+ "InferencingServer",
"InstanceTypeSchema",
"InstanceTypeSchemaResources",
+ "IntellectualProperty",
"JobBase",
"JobBaseProperties",
"JobBaseResourceArmPaginatedResult",
@@ -739,10 +1073,27 @@
"JobResourceConfiguration",
"JobScheduleAction",
"JobService",
+ "KerberosCredentials",
+ "KerberosKeytabCredentials",
+ "KerberosKeytabSecrets",
+ "KerberosPasswordCredentials",
+ "KerberosPasswordSecrets",
+ "KeyVaultProperties",
"Kubernetes",
"KubernetesOnlineDeployment",
"KubernetesProperties",
"KubernetesSchema",
+ "LabelCategory",
+ "LabelClass",
+ "LabelingDataConfiguration",
+ "LabelingJob",
+ "LabelingJobImageProperties",
+ "LabelingJobInstructions",
+ "LabelingJobMediaProperties",
+ "LabelingJobProperties",
+ "LabelingJobResourceArmPaginatedResult",
+ "LabelingJobTextProperties",
+ "LakeHouseArtifact",
"ListAmlUserFeatureResult",
"ListNotebookKeysResult",
"ListStorageAccountKeysResult",
@@ -750,24 +1101,53 @@
"ListWorkspaceKeysResult",
"ListWorkspaceQuotas",
"LiteralJobInput",
+ "MLAssistConfiguration",
+ "MLAssistConfigurationDisabled",
+ "MLAssistConfigurationEnabled",
"MLFlowModelJobInput",
"MLFlowModelJobOutput",
"MLTableData",
"MLTableJobInput",
"MLTableJobOutput",
+ "ManagedComputeIdentity",
"ManagedIdentity",
"ManagedIdentityAuthTypeWorkspaceConnectionProperties",
+ "ManagedNetworkProvisionOptions",
+ "ManagedNetworkProvisionStatus",
+ "ManagedNetworkSettings",
"ManagedOnlineDeployment",
"ManagedServiceIdentity",
+ "MaterializationComputeResource",
+ "MaterializationSettings",
"MedianStoppingPolicy",
+ "ModelConfiguration",
"ModelContainer",
"ModelContainerProperties",
"ModelContainerResourceArmPaginatedResult",
+ "ModelPackageInput",
+ "ModelPerformanceMetricThresholdBase",
+ "ModelPerformanceSignal",
"ModelVersion",
"ModelVersionProperties",
"ModelVersionResourceArmPaginatedResult",
+ "MonitorComputeConfigurationBase",
+ "MonitorComputeIdentityBase",
+ "MonitorDefinition",
+ "MonitorEmailNotificationSettings",
+ "MonitorNotificationSettings",
+ "MonitorServerlessSparkCompute",
+ "MonitoringDataSegment",
+ "MonitoringFeatureFilterBase",
+ "MonitoringInputDataBase",
+ "MonitoringSignalBase",
+ "MonitoringTarget",
+ "MonitoringThreshold",
+ "MonitoringWorkspaceConnection",
"Mpi",
"NCrossValidations",
+ "NlpFixedParameters",
+ "NlpParameterSubspace",
+ "NlpSweepSettings",
"NlpVertical",
"NlpVerticalFeaturizationSettings",
"NlpVerticalLimitSettings",
@@ -778,24 +1158,48 @@
"NotebookAccessTokenResult",
"NotebookPreparationError",
"NotebookResourceInfo",
+ "NotificationSetting",
+ "NumericalDataDriftMetricThreshold",
+ "NumericalDataQualityMetricThreshold",
+ "NumericalPredictionDriftMetricThreshold",
"Objective",
+ "OneLakeArtifact",
+ "OneLakeDatastore",
"OnlineDeployment",
"OnlineDeploymentProperties",
"OnlineDeploymentTrackedResourceArmPaginatedResult",
"OnlineEndpoint",
"OnlineEndpointProperties",
"OnlineEndpointTrackedResourceArmPaginatedResult",
+ "OnlineInferenceConfiguration",
"OnlineRequestSettings",
"OnlineScaleSettings",
+ "Operation",
+ "OperationDisplay",
+ "OperationListResult",
+ "OsPatchingStatus",
+ "OutboundRule",
+ "OutboundRuleBasicResource",
+ "OutboundRuleListResult",
"OutputPathAssetReference",
"PATAuthTypeWorkspaceConnectionProperties",
+ "PackageInputPathBase",
+ "PackageInputPathId",
+ "PackageInputPathUrl",
+ "PackageInputPathVersion",
+ "PackageRequest",
+ "PackageResponse",
"PaginatedComputeResourcesList",
"PartialBatchDeployment",
"PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties",
+ "PartialJobBase",
+ "PartialJobBasePartialResource",
"PartialManagedServiceIdentity",
"PartialMinimalTrackedResource",
"PartialMinimalTrackedResourceWithIdentity",
"PartialMinimalTrackedResourceWithSku",
+ "PartialMinimalTrackedResourceWithSkuAndIdentity",
+ "PartialNotificationSetting",
"PartialRegistryPartialTrackedResource",
"PartialSku",
"Password",
@@ -804,18 +1208,26 @@
"PendingUploadResponseDto",
"PersonalComputeInstanceSettings",
"PipelineJob",
+ "PredictionDriftMetricThresholdBase",
+ "PredictionDriftMonitoringSignal",
"PrivateEndpoint",
"PrivateEndpointConnection",
"PrivateEndpointConnectionListResult",
+ "PrivateEndpointDestination",
+ "PrivateEndpointOutboundRule",
"PrivateEndpointResource",
"PrivateLinkResource",
"PrivateLinkResourceListResult",
"PrivateLinkServiceConnectionState",
"ProbeSettings",
+ "ProgressMetrics",
+ "ProxyResource",
"PyTorch",
+ "QueueSettings",
"QuotaBaseProperties",
"QuotaUpdateParameters",
"RandomSamplingAlgorithm",
+ "Ray",
"Recurrence",
"RecurrenceSchedule",
"RecurrenceTrigger",
@@ -828,13 +1240,17 @@
"RegistryRegionArmDetails",
"RegistryTrackedResourceArmPaginatedResult",
"Regression",
+ "RegressionModelPerformanceMetricThreshold",
"RegressionTrainingSettings",
+ "RequestLogging",
+ "ResizeSchema",
"Resource",
"ResourceBase",
"ResourceConfiguration",
"ResourceId",
"ResourceName",
"ResourceQuota",
+ "RollingInputData",
"Route",
"SASAuthTypeWorkspaceConnectionProperties",
"SASCredentialDto",
@@ -851,9 +1267,20 @@
"ScriptReference",
"ScriptsToExecute",
"Seasonality",
+ "SecretConfiguration",
+ "ServerlessEndpoint",
+ "ServerlessEndpointCapacityReservation",
+ "ServerlessEndpointProperties",
+ "ServerlessEndpointStatus",
+ "ServerlessEndpointTrackedResourceArmPaginatedResult",
+ "ServerlessInferenceEndpoint",
+ "ServerlessOffer",
"ServiceManagedResourcesSettings",
+ "ServicePrincipalAuthTypeWorkspaceConnectionProperties",
"ServicePrincipalDatastoreCredentials",
"ServicePrincipalDatastoreSecrets",
+ "ServiceTagDestination",
+ "ServiceTagOutboundRule",
"SetupScripts",
"SharedPrivateLinkResource",
"Sku",
@@ -861,8 +1288,15 @@
"SkuResource",
"SkuResourceArmPaginatedResult",
"SkuSetting",
+ "SparkJob",
+ "SparkJobEntry",
+ "SparkJobPythonEntry",
+ "SparkJobScalaEntry",
+ "SparkResourceConfiguration",
"SslConfiguration",
"StackEnsembleSettings",
+ "StaticInputData",
+ "StatusMessage",
"StorageAccountDetails",
"SweepJob",
"SweepJobLimits",
@@ -872,6 +1306,9 @@
"SystemCreatedStorageAccount",
"SystemData",
"SystemService",
+ "TableFixedParameters",
+ "TableParameterSubspace",
+ "TableSweepSettings",
"TableVertical",
"TableVerticalFeaturizationSettings",
"TableVerticalLimitSettings",
@@ -883,10 +1320,12 @@
"TextClassificationMultilabel",
"TextNer",
"TmpfsOptions",
+ "TopNFeaturesByAttribution",
"TrackedResource",
"TrainingSettings",
"TrialComponent",
"TriggerBase",
+ "TritonInferencingServer",
"TritonModelJobInput",
"TritonModelJobOutput",
"TruncationSelectionPolicy",
@@ -917,26 +1356,41 @@
"VirtualMachineSshCredentials",
"VolumeDefinition",
"VolumeOptions",
+ "Webhook",
"Workspace",
+ "WorkspaceConnectionAccessKey",
+ "WorkspaceConnectionApiKey",
"WorkspaceConnectionManagedIdentity",
"WorkspaceConnectionPersonalAccessToken",
"WorkspaceConnectionPropertiesV2",
"WorkspaceConnectionPropertiesV2BasicResource",
"WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult",
+ "WorkspaceConnectionServicePrincipal",
"WorkspaceConnectionSharedAccessSignature",
+ "WorkspaceConnectionUpdateParameter",
"WorkspaceConnectionUsernamePassword",
+ "WorkspaceHubConfig",
"WorkspaceListResult",
+ "WorkspacePrivateEndpointResource",
"WorkspaceUpdateParameters",
+ "ActionType",
"AllocationState",
"ApplicationSharingPolicy",
"AssetProvisioningState",
+ "AutoDeleteCondition",
"AutoRebuildSetting",
"Autosave",
+ "BaseEnvironmentSourceType",
+ "BatchDeploymentConfigurationType",
"BatchLoggingLevel",
"BatchOutputAction",
"BillingCurrency",
"BlockedTransformers",
"Caching",
+ "CategoricalDataDriftMetric",
+ "CategoricalDataQualityMetric",
+ "CategoricalPredictionDriftMetric",
+ "ClassificationModelPerformanceMetric",
"ClassificationModels",
"ClassificationMultilabelPrimaryMetrics",
"ClassificationPrimaryMetrics",
@@ -944,12 +1398,18 @@
"ComputeInstanceAuthorizationType",
"ComputeInstanceState",
"ComputePowerAction",
+ "ComputeRecurrenceFrequency",
+ "ComputeTriggerType",
"ComputeType",
+ "ComputeWeekDay",
"ConnectionAuthType",
"ConnectionCategory",
"ContainerType",
"CreatedByType",
"CredentialsType",
+ "DataAvailabilityStatus",
+ "DataCollectionMode",
+ "DataImportSourceType",
"DataType",
"DatastoreType",
"DeploymentProvisioningState",
@@ -957,6 +1417,7 @@
"DistributionType",
"EarlyTerminationPolicyType",
"EgressPublicNetworkAccessType",
+ "EmailNotificationEnableType",
"EncryptionStatus",
"EndpointAuthMode",
"EndpointComputeType",
@@ -964,58 +1425,104 @@
"EndpointServiceConnectionStatus",
"EnvironmentType",
"EnvironmentVariableType",
+ "ExportFormatType",
+ "FeatureAttributionMetric",
+ "FeatureDataType",
+ "FeatureImportanceMode",
"FeatureLags",
"FeaturizationMode",
"ForecastHorizonMode",
"ForecastingModels",
"ForecastingPrimaryMetrics",
+ "GenerationSafetyQualityMetric",
+ "GenerationTokenUsageMetric",
"Goal",
"IdentityConfigurationType",
+ "ImageAnnotationType",
"ImageType",
+ "IncrementalDataRefresh",
+ "InferencingServerType",
"InputDeliveryMode",
+ "InputPathType",
"InstanceSegmentationPrimaryMetrics",
+ "IsolationMode",
"JobInputType",
"JobLimitsType",
"JobOutputType",
+ "JobProvisioningState",
"JobStatus",
+ "JobTier",
"JobType",
"KeyType",
"LearningRateScheduler",
"ListViewType",
"LoadBalancerType",
+ "LogTrainingMetrics",
+ "LogValidationLoss",
"LogVerbosity",
+ "MLAssistConfigurationType",
+ "MLFlowAutologgerState",
+ "ManagedNetworkStatus",
"ManagedServiceIdentityType",
+ "MaterializationStoreType",
+ "MediaType",
+ "MlflowAutologger",
"ModelSize",
+ "ModelTaskType",
+ "MonitorComputeIdentityType",
+ "MonitorComputeType",
+ "MonitoringFeatureDataType",
+ "MonitoringFeatureFilterType",
+ "MonitoringInputDataType",
+ "MonitoringModelType",
+ "MonitoringNotificationType",
+ "MonitoringSignalType",
"MountAction",
"MountState",
+ "MultiSelect",
"NCrossValidationsMode",
"Network",
+ "NlpLearningRateScheduler",
"NodeState",
"NodesValueType",
+ "NumericalDataDriftMetric",
+ "NumericalDataQualityMetric",
+ "NumericalPredictionDriftMetric",
"ObjectDetectionPrimaryMetrics",
+ "OneLakeArtifactType",
"OperatingSystemType",
"OperationName",
"OperationStatus",
"OperationTrigger",
"OrderString",
+ "Origin",
"OsType",
"OutputDeliveryMode",
+ "PackageBuildState",
+ "PackageInputDeliveryMode",
+ "PackageInputType",
+ "PatchStatus",
"PendingUploadCredentialType",
"PendingUploadType",
"PrivateEndpointConnectionProvisioningState",
- "PrivateEndpointServiceConnectionStatus",
+ "ProtectionLevel",
"Protocol",
"ProvisioningState",
"ProvisioningStatus",
- "PublicNetworkAccess",
"PublicNetworkAccessType",
"QuotaUnit",
"RandomSamplingAlgorithmRule",
"RecurrenceFrequency",
"ReferenceType",
+ "RegressionModelPerformanceMetric",
"RegressionModels",
"RegressionPrimaryMetrics",
"RemoteLoginPortPublicAccess",
+ "RollingRateType",
+ "RuleAction",
+ "RuleCategory",
+ "RuleStatus",
+ "RuleType",
"SamplingAlgorithmType",
"ScaleType",
"ScheduleActionType",
@@ -1025,21 +1532,26 @@
"ScheduleStatus",
"SeasonalityMode",
"SecretsType",
+ "ServerlessInferenceEndpointAuthMode",
"ServiceDataAccessAuthIdentity",
"ShortSeriesHandlingConfiguration",
"SkuScaleType",
"SkuTier",
"SourceType",
+ "SparkJobEntryType",
"SshPublicAccess",
"SslConfigStatus",
"StackMetaLearnerType",
"Status",
+ "StatusMessageLevel",
"StochasticOptimizer",
"StorageAccountType",
"TargetAggregationFunction",
"TargetLagsMode",
"TargetRollingWindowSizeMode",
"TaskType",
+ "TextAnnotationType",
+ "TrainingMode",
"TriggerType",
"UnderlyingResourceAction",
"UnitOfMeasure",
@@ -1048,9 +1560,9 @@
"VMPriceOSType",
"VMTier",
"ValidationMetricType",
- "ValueFormat",
"VmPriority",
"VolumeDefinitionType",
+ "WebhookType",
"WeekDay",
]
__all__.extend([p for p in _patch_all if p not in __all__])
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_machine_learning_services_mgmt_client_enums.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_machine_learning_services_mgmt_client_enums.py
index a32b5b391440..58680c3e0b8e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_machine_learning_services_mgmt_client_enums.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_machine_learning_services_mgmt_client_enums.py
@@ -10,6 +10,12 @@
from azure.core import CaseInsensitiveEnumMeta
+class ActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum. Indicates the action type. "Internal" refers to actions that are for internal only APIs."""
+
+ INTERNAL = "Internal"
+
+
class AllocationState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Allocation state of the compute. Possible values are: steady - Indicates that the compute is
not resizing. There are no changes to the number of compute nodes in the compute in progress. A
@@ -43,6 +49,13 @@ class AssetProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
DELETING = "Deleting"
+class AutoDeleteCondition(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """AutoDeleteCondition."""
+
+ CREATED_GREATER_THAN = "CreatedGreaterThan"
+ LAST_ACCESSED_GREATER_THAN = "LastAccessedGreaterThan"
+
+
class AutoRebuildSetting(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""AutoRebuild setting for the derived image."""
@@ -58,6 +71,19 @@ class Autosave(str, Enum, metaclass=CaseInsensitiveEnumMeta):
REMOTE = "Remote"
+class BaseEnvironmentSourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Base environment type."""
+
+ ENVIRONMENT_ASSET = "EnvironmentAsset"
+
+
+class BatchDeploymentConfigurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The enumerated property types for batch deployments."""
+
+ MODEL = "Model"
+ PIPELINE_COMPONENT = "PipelineComponent"
+
+
class BatchLoggingLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Log verbosity for batch inferencing.
Increasing verbosity order for logging is : Warning, Info and Debug.
@@ -119,6 +145,50 @@ class Caching(str, Enum, metaclass=CaseInsensitiveEnumMeta):
READ_WRITE = "ReadWrite"
+class CategoricalDataDriftMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """CategoricalDataDriftMetric."""
+
+ JENSEN_SHANNON_DISTANCE = "JensenShannonDistance"
+ """The Jensen Shannon Distance (JSD) metric."""
+ POPULATION_STABILITY_INDEX = "PopulationStabilityIndex"
+ """The Population Stability Index (PSI) metric."""
+ PEARSONS_CHI_SQUARED_TEST = "PearsonsChiSquaredTest"
+ """The Pearsons Chi Squared Test metric."""
+
+
+class CategoricalDataQualityMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """CategoricalDataQualityMetric."""
+
+ NULL_VALUE_RATE = "NullValueRate"
+ """Calculates the rate of null values."""
+ DATA_TYPE_ERROR_RATE = "DataTypeErrorRate"
+ """Calculates the rate of data type errors."""
+ OUT_OF_BOUNDS_RATE = "OutOfBoundsRate"
+ """Calculates the rate values are out of bounds."""
+
+
+class CategoricalPredictionDriftMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """CategoricalPredictionDriftMetric."""
+
+ JENSEN_SHANNON_DISTANCE = "JensenShannonDistance"
+ """The Jensen Shannon Distance (JSD) metric."""
+ POPULATION_STABILITY_INDEX = "PopulationStabilityIndex"
+ """The Population Stability Index (PSI) metric."""
+ PEARSONS_CHI_SQUARED_TEST = "PearsonsChiSquaredTest"
+ """The Pearsons Chi Squared Test metric."""
+
+
+class ClassificationModelPerformanceMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """ClassificationModelPerformanceMetric."""
+
+ ACCURACY = "Accuracy"
+ """Calculates the accuracy of the model predictions."""
+ PRECISION = "Precision"
+ """Calculates the precision of the model predictions."""
+ RECALL = "Recall"
+ """Calculates the recall of the model predictions."""
+
+
class ClassificationModels(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum for all classification models supported by AutoML."""
@@ -166,7 +236,7 @@ class ClassificationModels(str, Enum, metaclass=CaseInsensitiveEnumMeta):
#: decision rules inferred from the data features."""
RANDOM_FOREST = "RandomForest"
"""Random forest is a supervised learning algorithm.
- #: The "forest"\ it builds, is an ensemble of decision trees, usually trained with the “bagging”\
+ #: The "forest" it builds, is an ensemble of decision trees, usually trained with the bagging
#: method.
#: The general idea of the bagging method is that a combination of learning models increases the
#: overall result."""
@@ -247,6 +317,7 @@ class ComputeInstanceState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
DELETING = "Deleting"
RUNNING = "Running"
RESTARTING = "Restarting"
+ RESIZING = "Resizing"
JOB_RUNNING = "JobRunning"
SETTING_UP = "SettingUp"
SETUP_FAILED = "SetupFailed"
@@ -260,12 +331,34 @@ class ComputeInstanceState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
class ComputePowerAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
- """The compute power action."""
+ """[Required] The compute power action."""
START = "Start"
STOP = "Stop"
+class ComputeRecurrenceFrequency(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to describe the frequency of a compute recurrence schedule."""
+
+ MINUTE = "Minute"
+ """Minute frequency"""
+ HOUR = "Hour"
+ """Hour frequency"""
+ DAY = "Day"
+ """Day frequency"""
+ WEEK = "Week"
+ """Week frequency"""
+ MONTH = "Month"
+ """Month frequency"""
+
+
+class ComputeTriggerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """ComputeTriggerType."""
+
+ RECURRENCE = "Recurrence"
+ CRON = "Cron"
+
+
class ComputeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type of compute."""
@@ -281,6 +374,25 @@ class ComputeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
SYNAPSE_SPARK = "SynapseSpark"
+class ComputeWeekDay(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum of weekday."""
+
+ MONDAY = "Monday"
+ """Monday weekday"""
+ TUESDAY = "Tuesday"
+ """Tuesday weekday"""
+ WEDNESDAY = "Wednesday"
+ """Wednesday weekday"""
+ THURSDAY = "Thursday"
+ """Thursday weekday"""
+ FRIDAY = "Friday"
+ """Friday weekday"""
+ SATURDAY = "Saturday"
+ """Saturday weekday"""
+ SUNDAY = "Sunday"
+ """Sunday weekday"""
+
+
class ConnectionAuthType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Authentication type of the connection target."""
@@ -289,6 +401,10 @@ class ConnectionAuthType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
USERNAME_PASSWORD = "UsernamePassword"
NONE = "None"
SAS = "SAS"
+ SERVICE_PRINCIPAL = "ServicePrincipal"
+ ACCESS_KEY = "AccessKey"
+ API_KEY = "ApiKey"
+ CUSTOM_KEYS = "CustomKeys"
class ConnectionCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -297,13 +413,30 @@ class ConnectionCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta):
PYTHON_FEED = "PythonFeed"
CONTAINER_REGISTRY = "ContainerRegistry"
GIT = "Git"
+ S3 = "S3"
+ SNOWFLAKE = "Snowflake"
+ AZURE_SQL_DB = "AzureSqlDb"
+ AZURE_SYNAPSE_ANALYTICS = "AzureSynapseAnalytics"
+ AZURE_MY_SQL_DB = "AzureMySqlDb"
+ AZURE_POSTGRES_DB = "AzurePostgresDb"
+ ADLS_GEN2 = "ADLSGen2"
+ REDIS = "Redis"
+ API_KEY = "ApiKey"
+ AZURE_OPEN_AI = "AzureOpenAI"
+ COGNITIVE_SEARCH = "CognitiveSearch"
+ COGNITIVE_SERVICE = "CognitiveService"
+ CUSTOM_KEYS = "CustomKeys"
class ContainerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
- """ContainerType."""
+ """The type of container to retrieve logs from."""
STORAGE_INITIALIZER = "StorageInitializer"
+ """The container used to download models and score script."""
INFERENCE_SERVER = "InferenceServer"
+ """The container used to serve user's request."""
+ MODEL_DATA_COLLECTOR = "ModelDataCollector"
+ """The container used to collect payload and custom logging when mdc is enabled."""
class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -323,6 +456,31 @@ class CredentialsType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
NONE = "None"
SAS = "Sas"
SERVICE_PRINCIPAL = "ServicePrincipal"
+ KERBEROS_KEYTAB = "KerberosKeytab"
+ KERBEROS_PASSWORD = "KerberosPassword"
+
+
+class DataAvailabilityStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """DataAvailabilityStatus."""
+
+ NONE = "None"
+ PENDING = "Pending"
+ INCOMPLETE = "Incomplete"
+ COMPLETE = "Complete"
+
+
+class DataCollectionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """DataCollectionMode."""
+
+ ENABLED = "Enabled"
+ DISABLED = "Disabled"
+
+
+class DataImportSourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the type of data."""
+
+ DATABASE = "database"
+ FILE_SYSTEM = "file_system"
class DatastoreType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -332,6 +490,8 @@ class DatastoreType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
AZURE_DATA_LAKE_GEN1 = "AzureDataLakeGen1"
AZURE_DATA_LAKE_GEN2 = "AzureDataLakeGen2"
AZURE_FILE = "AzureFile"
+ HDFS = "Hdfs"
+ ONE_LAKE = "OneLake"
class DataType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -368,6 +528,7 @@ class DistributionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
PY_TORCH = "PyTorch"
TENSOR_FLOW = "TensorFlow"
MPI = "Mpi"
+ RAY = "Ray"
class EarlyTerminationPolicyType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -387,6 +548,14 @@ class EgressPublicNetworkAccessType(str, Enum, metaclass=CaseInsensitiveEnumMeta
DISABLED = "Disabled"
+class EmailNotificationEnableType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the email notification type."""
+
+ JOB_COMPLETED = "JobCompleted"
+ JOB_FAILED = "JobFailed"
+ JOB_CANCELLED = "JobCancelled"
+
+
class EncryptionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Indicates whether or not the encryption is enabled for the workspace."""
@@ -428,6 +597,7 @@ class EndpointServiceConnectionStatus(str, Enum, metaclass=CaseInsensitiveEnumMe
PENDING = "Pending"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
+ TIMEOUT = "Timeout"
class EnvironmentType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -443,6 +613,43 @@ class EnvironmentVariableType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
LOCAL = "local"
+class ExportFormatType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The format of exported labels."""
+
+ DATASET = "Dataset"
+ COCO = "Coco"
+ CSV = "CSV"
+
+
+class FeatureAttributionMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """FeatureAttributionMetric."""
+
+ NORMALIZED_DISCOUNTED_CUMULATIVE_GAIN = "NormalizedDiscountedCumulativeGain"
+ """The Normalized Discounted Cumulative Gain metric."""
+
+
+class FeatureDataType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """FeatureDataType."""
+
+ STRING = "String"
+ INTEGER = "Integer"
+ LONG = "Long"
+ FLOAT = "Float"
+ DOUBLE = "Double"
+ BINARY = "Binary"
+ DATETIME = "Datetime"
+ BOOLEAN = "Boolean"
+
+
+class FeatureImportanceMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The mode of operation for computing feature importance."""
+
+ DISABLED = "Disabled"
+ """Disables computing feature importance within a signal."""
+ ENABLED = "Enabled"
+ """Enables computing feature importance within a signal."""
+
+
class FeatureLags(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag for generating lags for the numeric features."""
@@ -537,7 +744,7 @@ class ForecastingModels(str, Enum, metaclass=CaseInsensitiveEnumMeta):
#: It's an inexact but powerful technique."""
RANDOM_FOREST = "RandomForest"
"""Random forest is a supervised learning algorithm.
- #: The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging”
+ #: The "forest" it builds, is an ensemble of decision trees, usually trained with the bagging
#: method.
#: The general idea of the bagging method is that a combination of learning models increases the
#: overall result."""
@@ -567,6 +774,28 @@ class ForecastingPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
#: Error (MAE) of (time) series with different scales."""
+class GenerationSafetyQualityMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Generation safety quality metric enum."""
+
+ ACCEPTABLE_GROUNDEDNESS_SCORE_PER_INSTANCE = "AcceptableGroundednessScorePerInstance"
+ AGGREGATED_GROUNDEDNESS_PASS_RATE = "AggregatedGroundednessPassRate"
+ ACCEPTABLE_COHERENCE_SCORE_PER_INSTANCE = "AcceptableCoherenceScorePerInstance"
+ AGGREGATED_COHERENCE_PASS_RATE = "AggregatedCoherencePassRate"
+ ACCEPTABLE_FLUENCY_SCORE_PER_INSTANCE = "AcceptableFluencyScorePerInstance"
+ AGGREGATED_FLUENCY_PASS_RATE = "AggregatedFluencyPassRate"
+ ACCEPTABLE_SIMILARITY_SCORE_PER_INSTANCE = "AcceptableSimilarityScorePerInstance"
+ AGGREGATED_SIMILARITY_PASS_RATE = "AggregatedSimilarityPassRate"
+ ACCEPTABLE_RELEVANCE_SCORE_PER_INSTANCE = "AcceptableRelevanceScorePerInstance"
+ AGGREGATED_RELEVANCE_PASS_RATE = "AggregatedRelevancePassRate"
+
+
+class GenerationTokenUsageMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Generation token statistics metric enum."""
+
+ TOTAL_TOKEN_COUNT = "TotalTokenCount"
+ TOTAL_TOKEN_COUNT_PER_GROUP = "TotalTokenCountPerGroup"
+
+
class Goal(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Defines supported metric goals for hyperparameter tuning."""
@@ -582,6 +811,14 @@ class IdentityConfigurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
USER_IDENTITY = "UserIdentity"
+class ImageAnnotationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Annotation type of image data."""
+
+ CLASSIFICATION = "Classification"
+ BOUNDING_BOX = "BoundingBox"
+ INSTANCE_SEGMENTATION = "InstanceSegmentation"
+
+
class ImageType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Type of the image. Possible values are: docker - For docker images. azureml - For AzureML
images.
@@ -591,6 +828,22 @@ class ImageType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
AZUREML = "azureml"
+class IncrementalDataRefresh(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Whether IncrementalDataRefresh is enabled."""
+
+ ENABLED = "Enabled"
+ DISABLED = "Disabled"
+
+
+class InferencingServerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Inferencing server type for various targets."""
+
+ AZURE_ML_ONLINE = "AzureMLOnline"
+ AZURE_ML_BATCH = "AzureMLBatch"
+ TRITON = "Triton"
+ CUSTOM = "Custom"
+
+
class InputDeliveryMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum to determine the input data delivery mode."""
@@ -602,6 +855,14 @@ class InputDeliveryMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
EVAL_DOWNLOAD = "EvalDownload"
+class InputPathType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Input path type for package inputs."""
+
+ URL = "Url"
+ PATH_ID = "PathId"
+ PATH_VERSION = "PathVersion"
+
+
class InstanceSegmentationPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Primary metrics for InstanceSegmentation tasks."""
@@ -610,6 +871,14 @@ class InstanceSegmentationPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnu
#: AP is calculated for each class and averaged to get the MAP."""
+class IsolationMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Isolation mode for the managed network of a machine learning workspace."""
+
+ DISABLED = "Disabled"
+ ALLOW_INTERNET_OUTBOUND = "AllowInternetOutbound"
+ ALLOW_ONLY_APPROVED_OUTBOUND = "AllowOnlyApprovedOutbound"
+
+
class JobInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum to determine the Job Input Type."""
@@ -640,6 +909,15 @@ class JobOutputType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
TRITON_MODEL = "triton_model"
+class JobProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the job provisioning state."""
+
+ SUCCEEDED = "Succeeded"
+ FAILED = "Failed"
+ CANCELED = "Canceled"
+ IN_PROGRESS = "InProgress"
+
+
class JobStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The status of a job."""
@@ -676,6 +954,18 @@ class JobStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The job is paused by users. Some adjustment to labeling jobs can be made only in paused state."""
UNKNOWN = "Unknown"
"""Default job status if not mapped to all other statuses"""
+ SCHEDULED = "Scheduled"
+ """The job is in a scheduled state. Job is not in any active state."""
+
+
+class JobTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the job tier."""
+
+ NULL = "Null"
+ SPOT = "Spot"
+ BASIC = "Basic"
+ STANDARD = "Standard"
+ PREMIUM = "Premium"
class JobType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -683,8 +973,10 @@ class JobType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
AUTO_ML = "AutoML"
COMMAND = "Command"
+ LABELING = "Labeling"
SWEEP = "Sweep"
PIPELINE = "Pipeline"
+ SPARK = "Spark"
class KeyType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -720,6 +1012,24 @@ class LoadBalancerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
INTERNAL_LOAD_BALANCER = "InternalLoadBalancer"
+class LogTrainingMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """LogTrainingMetrics."""
+
+ ENABLE = "Enable"
+ """Enable compute and log training metrics."""
+ DISABLE = "Disable"
+ """Disable compute and log training metrics."""
+
+
+class LogValidationLoss(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """LogValidationLoss."""
+
+ ENABLE = "Enable"
+ """Enable compute and log validation metrics."""
+ DISABLE = "Disable"
+ """Disable compute and log validation metrics."""
+
+
class LogVerbosity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum for setting log verbosity."""
@@ -737,6 +1047,13 @@ class LogVerbosity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Only critical statements logged."""
+class ManagedNetworkStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Status for the managed network of a machine learning workspace."""
+
+ INACTIVE = "Inactive"
+ ACTIVE = "Active"
+
+
class ManagedServiceIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Type of managed service identity (where both SystemAssigned and UserAssigned types are
allowed).
@@ -748,6 +1065,43 @@ class ManagedServiceIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned"
+class MaterializationStoreType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MaterializationStoreType."""
+
+ NONE = "None"
+ ONLINE = "Online"
+ OFFLINE = "Offline"
+ ONLINE_AND_OFFLINE = "OnlineAndOffline"
+
+
+class MediaType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Media type of data asset."""
+
+ IMAGE = "Image"
+ TEXT = "Text"
+
+
+class MLAssistConfigurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MLAssistConfigurationType."""
+
+ ENABLED = "Enabled"
+ DISABLED = "Disabled"
+
+
+class MlflowAutologger(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Indicates whether mlflow autologger is enabled for notebooks."""
+
+ ENABLED = "Enabled"
+ DISABLED = "Disabled"
+
+
+class MLFlowAutologgerState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the state of mlflow autologger."""
+
+ ENABLED = "Enabled"
+ DISABLED = "Disabled"
+
+
class ModelSize(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Image model size."""
@@ -763,6 +1117,104 @@ class ModelSize(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Extra large size."""
+class ModelTaskType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Model task type enum."""
+
+ CLASSIFICATION = "Classification"
+ REGRESSION = "Regression"
+ QUESTION_ANSWERING = "QuestionAnswering"
+
+
+class MonitorComputeIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Monitor compute identity type enum."""
+
+ AML_TOKEN = "AmlToken"
+ """Authenticates through user's AML token."""
+ MANAGED_IDENTITY = "ManagedIdentity"
+ """Authenticates through a user-provided managed identity."""
+
+
+class MonitorComputeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Monitor compute type enum."""
+
+ SERVERLESS_SPARK = "ServerlessSpark"
+ """Serverless Spark compute."""
+
+
+class MonitoringFeatureDataType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MonitoringFeatureDataType."""
+
+ NUMERICAL = "Numerical"
+ """Used for features of numerical data type."""
+ CATEGORICAL = "Categorical"
+ """Used for features of categorical data type."""
+
+
+class MonitoringFeatureFilterType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MonitoringFeatureFilterType."""
+
+ ALL_FEATURES = "AllFeatures"
+ """Includes all features."""
+ TOP_N_BY_ATTRIBUTION = "TopNByAttribution"
+ """Only includes the top contributing features, measured by feature attribution."""
+ FEATURE_SUBSET = "FeatureSubset"
+ """Includes a user-defined subset of features."""
+
+
+class MonitoringInputDataType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Monitoring input data type enum."""
+
+ STATIC = "Static"
+ """An input data with a fixed window size."""
+ ROLLING = "Rolling"
+ """An input data which rolls relatively to the monitor's current run time."""
+ FIXED = "Fixed"
+ """An input data with tabular format which doesn't require preprocessing."""
+
+
+class MonitoringModelType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MonitoringModelType."""
+
+ CLASSIFICATION = "Classification"
+ """A model trained for classification tasks."""
+ REGRESSION = "Regression"
+ """A model trained for regressions tasks."""
+
+
+class MonitoringNotificationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MonitoringNotificationType."""
+
+ AML_NOTIFICATION = "AmlNotification"
+ """Enables email notifications through AML notifications."""
+ AZURE_MONITOR = "AzureMonitor"
+ """Enables notifications through Azure Monitor by posting metrics to the workspace's Azure Monitor
+ #: instance."""
+
+
+class MonitoringSignalType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MonitoringSignalType."""
+
+ DATA_DRIFT = "DataDrift"
+ """Tracks model input data distribution change, comparing against training data or past production
+ #: data."""
+ PREDICTION_DRIFT = "PredictionDrift"
+ """Tracks prediction result data distribution change, comparing against validation/test label data
+ #: or past production data."""
+ DATA_QUALITY = "DataQuality"
+ """Tracks model input data integrity."""
+ FEATURE_ATTRIBUTION_DRIFT = "FeatureAttributionDrift"
+ """Tracks feature importance change in production, comparing against feature importance at
+ #: training time."""
+ CUSTOM = "Custom"
+ """Tracks a custom signal provided by users."""
+ MODEL_PERFORMANCE = "ModelPerformance"
+ """Tracks model performance based on ground truth data."""
+ GENERATION_SAFETY_QUALITY = "GenerationSafetyQuality"
+ """Tracks the safety and quality of generated content."""
+ GENERATION_TOKEN_STATISTICS = "GenerationTokenStatistics"
+ """Tracks the token usage of generative endpoints."""
+
+
class MountAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Mount Action."""
@@ -781,6 +1233,13 @@ class MountState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
UNMOUNTED = "Unmounted"
+class MultiSelect(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Whether multiSelect is enabled."""
+
+ ENABLED = "Enabled"
+ DISABLED = "Disabled"
+
+
class NCrossValidationsMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Determines how N-Cross validations value is determined."""
@@ -798,6 +1257,25 @@ class Network(str, Enum, metaclass=CaseInsensitiveEnumMeta):
HOST = "Host"
+class NlpLearningRateScheduler(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum of learning rate schedulers that aligns with those supported by HF."""
+
+ NONE = "None"
+ """No learning rate schedule."""
+ LINEAR = "Linear"
+ """Linear warmup and decay."""
+ COSINE = "Cosine"
+ """Linear warmup then cosine decay."""
+ COSINE_WITH_RESTARTS = "CosineWithRestarts"
+ """Linear warmup, cosine decay, then restart to initial LR."""
+ POLYNOMIAL = "Polynomial"
+ """Increase linearly then polynomially decay."""
+ CONSTANT = "Constant"
+ """Constant learning rate."""
+ CONSTANT_WITH_WARMUP = "ConstantWithWarmup"
+ """Linear warmup followed by constant value."""
+
+
class NodeState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""State of the compute node. Values are idle, running, preparing, unusable, leaving and
preempted.
@@ -815,6 +1293,44 @@ class NodesValueType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The enumerated types for the nodes value."""
ALL = "All"
+ CUSTOM = "Custom"
+
+
+class NumericalDataDriftMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """NumericalDataDriftMetric."""
+
+ JENSEN_SHANNON_DISTANCE = "JensenShannonDistance"
+ """The Jensen Shannon Distance (JSD) metric."""
+ POPULATION_STABILITY_INDEX = "PopulationStabilityIndex"
+ """The Population Stability Index (PSI) metric."""
+ NORMALIZED_WASSERSTEIN_DISTANCE = "NormalizedWassersteinDistance"
+ """The Normalized Wasserstein Distance metric."""
+ TWO_SAMPLE_KOLMOGOROV_SMIRNOV_TEST = "TwoSampleKolmogorovSmirnovTest"
+ """The Two Sample Kolmogorov-Smirnov Test (two-sample K–S) metric."""
+
+
+class NumericalDataQualityMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """NumericalDataQualityMetric."""
+
+ NULL_VALUE_RATE = "NullValueRate"
+ """Calculates the rate of null values."""
+ DATA_TYPE_ERROR_RATE = "DataTypeErrorRate"
+ """Calculates the rate of data type errors."""
+ OUT_OF_BOUNDS_RATE = "OutOfBoundsRate"
+ """Calculates the rate values are out of bounds."""
+
+
+class NumericalPredictionDriftMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """NumericalPredictionDriftMetric."""
+
+ JENSEN_SHANNON_DISTANCE = "JensenShannonDistance"
+ """The Jensen Shannon Distance (JSD) metric."""
+ POPULATION_STABILITY_INDEX = "PopulationStabilityIndex"
+ """The Population Stability Index (PSI) metric."""
+ NORMALIZED_WASSERSTEIN_DISTANCE = "NormalizedWassersteinDistance"
+ """The Normalized Wasserstein Distance metric."""
+ TWO_SAMPLE_KOLMOGOROV_SMIRNOV_TEST = "TwoSampleKolmogorovSmirnovTest"
+ """The Two Sample Kolmogorov-Smirnov Test (two-sample K–S) metric."""
class ObjectDetectionPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -825,6 +1341,12 @@ class ObjectDetectionPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta
#: AP is calculated for each class and averaged to get the MAP."""
+class OneLakeArtifactType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine OneLake artifact type."""
+
+ LAKE_HOUSE = "LakeHouse"
+
+
class OperatingSystemType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type of operating system."""
@@ -839,6 +1361,7 @@ class OperationName(str, Enum, metaclass=CaseInsensitiveEnumMeta):
START = "Start"
STOP = "Stop"
RESTART = "Restart"
+ RESIZE = "Resize"
REIMAGE = "Reimage"
DELETE = "Delete"
@@ -852,6 +1375,7 @@ class OperationStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
START_FAILED = "StartFailed"
STOP_FAILED = "StopFailed"
RESTART_FAILED = "RestartFailed"
+ RESIZE_FAILED = "ResizeFailed"
REIMAGE_FAILED = "ReimageFailed"
DELETE_FAILED = "DeleteFailed"
@@ -873,6 +1397,16 @@ class OrderString(str, Enum, metaclass=CaseInsensitiveEnumMeta):
UPDATED_AT_ASC = "UpdatedAtAsc"
+class Origin(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit
+ logs UX. Default value is "user,system".
+ """
+
+ USER = "user"
+ SYSTEM = "system"
+ USER_SYSTEM = "user,system"
+
+
class OsType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Compute OS Type."""
@@ -885,6 +1419,40 @@ class OutputDeliveryMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
READ_WRITE_MOUNT = "ReadWriteMount"
UPLOAD = "Upload"
+ DIRECT = "Direct"
+
+
+class PackageBuildState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Package build state returned in package response."""
+
+ NOT_STARTED = "NotStarted"
+ RUNNING = "Running"
+ SUCCEEDED = "Succeeded"
+ FAILED = "Failed"
+
+
+class PackageInputDeliveryMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Mounting type of the model or the inputs."""
+
+ COPY = "Copy"
+ DOWNLOAD = "Download"
+
+
+class PackageInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of the inputs."""
+
+ URI_FILE = "UriFile"
+ URI_FOLDER = "UriFolder"
+
+
+class PatchStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The os patching status."""
+
+ COMPLETED_WITH_WARNINGS = "CompletedWithWarnings"
+ FAILED = "Failed"
+ IN_PROGRESS = "InProgress"
+ SUCCEEDED = "Succeeded"
+ UNKNOWN = "Unknown"
class PendingUploadCredentialType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -909,14 +1477,13 @@ class PrivateEndpointConnectionProvisioningState(str, Enum, metaclass=CaseInsens
FAILED = "Failed"
-class PrivateEndpointServiceConnectionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
- """The private endpoint connection status."""
+class ProtectionLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Protection level associated with the Intellectual Property."""
- PENDING = "Pending"
- APPROVED = "Approved"
- REJECTED = "Rejected"
- DISCONNECTED = "Disconnected"
- TIMEOUT = "Timeout"
+ ALL = "All"
+ """All means Intellectual Property is fully protected."""
+ NONE = "None"
+ """None means it is not an Intellectual Property."""
class Protocol(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -928,8 +1495,8 @@ class Protocol(str, Enum, metaclass=CaseInsensitiveEnumMeta):
class ProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
- """The current deployment state of workspace resource. The provisioningState is to indicate states
- for resource provisioning.
+ """The provision state of the cluster. Valid values are Unknown, Updating, Provisioning,
+ Succeeded, and Failed.
"""
UNKNOWN = "Unknown"
@@ -949,13 +1516,6 @@ class ProvisioningStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
FAILED = "Failed"
-class PublicNetworkAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
- """Whether requests from Public Network are allowed."""
-
- ENABLED = "Enabled"
- DISABLED = "Disabled"
-
-
class PublicNetworkAccessType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum to determine whether PublicNetworkAccess is Enabled or Disabled."""
@@ -999,6 +1559,17 @@ class ReferenceType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
OUTPUT_PATH = "OutputPath"
+class RegressionModelPerformanceMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """RegressionModelPerformanceMetric."""
+
+ MEAN_ABSOLUTE_ERROR = "MeanAbsoluteError"
+ """The Mean Absolute Error (MAE) metric."""
+ ROOT_MEAN_SQUARED_ERROR = "RootMeanSquaredError"
+ """The Root Mean Squared Error (RMSE) metric."""
+ MEAN_SQUARED_ERROR = "MeanSquaredError"
+ """The Mean Squared Error (MSE) metric."""
+
+
class RegressionModels(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum for all Regression models supported by AutoML."""
@@ -1029,7 +1600,7 @@ class RegressionModels(str, Enum, metaclass=CaseInsensitiveEnumMeta):
#: It's an inexact but powerful technique."""
RANDOM_FOREST = "RandomForest"
"""Random forest is a supervised learning algorithm.
- #: The "forest"\ it builds, is an ensemble of decision trees, usually trained with the “bagging”\
+ #: The "forest" it builds, is an ensemble of decision trees, usually trained with the bagging
#: method.
#: The general idea of the bagging method is that a combination of learning models increases the
#: overall result."""
@@ -1073,6 +1644,46 @@ class RemoteLoginPortPublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
NOT_SPECIFIED = "NotSpecified"
+class RollingRateType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """RollingRateType."""
+
+ YEAR = "Year"
+ MONTH = "Month"
+ DAY = "Day"
+ HOUR = "Hour"
+ MINUTE = "Minute"
+
+
+class RuleAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The action enum for networking rule."""
+
+ ALLOW = "Allow"
+ DENY = "Deny"
+
+
+class RuleCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Category of a managed network Outbound Rule of a machine learning workspace."""
+
+ REQUIRED = "Required"
+ RECOMMENDED = "Recommended"
+ USER_DEFINED = "UserDefined"
+
+
+class RuleStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of a managed network Outbound Rule of a machine learning workspace."""
+
+ INACTIVE = "Inactive"
+ ACTIVE = "Active"
+
+
+class RuleType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of a managed network Outbound Rule of a machine learning workspace."""
+
+ FQDN = "FQDN"
+ PRIVATE_ENDPOINT = "PrivateEndpoint"
+ SERVICE_TAG = "ServiceTag"
+
+
class SamplingAlgorithmType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""SamplingAlgorithmType."""
@@ -1093,6 +1704,8 @@ class ScheduleActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
CREATE_JOB = "CreateJob"
INVOKE_BATCH_ENDPOINT = "InvokeBatchEndpoint"
+ IMPORT_DATA = "ImportData"
+ CREATE_MONITOR = "CreateMonitor"
class ScheduleListViewType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -1145,6 +1758,15 @@ class SecretsType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
CERTIFICATE = "Certificate"
SAS = "Sas"
SERVICE_PRINCIPAL = "ServicePrincipal"
+ KERBEROS_PASSWORD = "KerberosPassword"
+ KERBEROS_KEYTAB = "KerberosKeytab"
+
+
+class ServerlessInferenceEndpointAuthMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """ServerlessInferenceEndpointAuthMode."""
+
+ KEY = "Key"
+ AAD = "AAD"
class ServiceDataAccessAuthIdentity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -1202,6 +1824,13 @@ class SourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
URI = "URI"
+class SparkJobEntryType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """SparkJobEntryType."""
+
+ SPARK_JOB_PYTHON_ENTRY = "SparkJobPythonEntry"
+ SPARK_JOB_SCALA_ENTRY = "SparkJobScalaEntry"
+
+
class SshPublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh
port is closed on this instance. Enabled - Indicates that the public ssh port is open and
@@ -1256,6 +1885,14 @@ class Status(str, Enum, metaclass=CaseInsensitiveEnumMeta):
OPERATION_NOT_ENABLED_FOR_REGION = "OperationNotEnabledForRegion"
+class StatusMessageLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """StatusMessageLevel."""
+
+ ERROR = "Error"
+ INFORMATION = "Information"
+ WARNING = "Warning"
+
+
class StochasticOptimizer(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Stochastic optimizer for image models."""
@@ -1349,6 +1986,24 @@ class TaskType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
#: occurrences of entities such as people, locations, organizations, and more."""
+class TextAnnotationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Annotation type of text data."""
+
+ CLASSIFICATION = "Classification"
+ NAMED_ENTITY_RECOGNITION = "NamedEntityRecognition"
+
+
+class TrainingMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Training mode dictates whether to use distributed training or not."""
+
+ AUTO = "Auto"
+ """Auto mode"""
+ DISTRIBUTED = "Distributed"
+ """Distributed training mode"""
+ NON_DISTRIBUTED = "NonDistributed"
+ """Non distributed training mode"""
+
+
class TriggerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""TriggerType."""
@@ -1397,12 +2052,6 @@ class ValidationMetricType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""CocoVoc metric."""
-class ValueFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta):
- """format for the workspace connection value."""
-
- JSON = "JSON"
-
-
class VMPriceOSType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Operating system type used by the VM."""
@@ -1434,6 +2083,12 @@ class VolumeDefinitionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
NPIPE = "npipe"
+class WebhookType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the webhook callback service type."""
+
+ AZURE_DEV_OPS = "AzureDevOps"
+
+
class WeekDay(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum of weekday."""
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py
index 736189ee6853..e5ce9debc1c9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py
@@ -24,17 +24,209 @@
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
+class WorkspaceConnectionPropertiesV2(_serialization.Model):
+ """WorkspaceConnectionPropertiesV2.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ AccessKeyAuthTypeWorkspaceConnectionProperties, ApiKeyAuthWorkspaceConnectionProperties,
+ CustomKeysWorkspaceConnectionProperties, ManagedIdentityAuthTypeWorkspaceConnectionProperties,
+ NoneAuthTypeWorkspaceConnectionProperties, PATAuthTypeWorkspaceConnectionProperties,
+ SASAuthTypeWorkspaceConnectionProperties,
+ ServicePrincipalAuthTypeWorkspaceConnectionProperties,
+ UsernamePasswordAuthTypeWorkspaceConnectionProperties
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
+ :ivar target:
+ :vartype target: str
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
+ "target": {"key": "target", "type": "str"},
+ }
+
+ _subtype_map = {
+ "auth_type": {
+ "AccessKey": "AccessKeyAuthTypeWorkspaceConnectionProperties",
+ "ApiKey": "ApiKeyAuthWorkspaceConnectionProperties",
+ "CustomKeys": "CustomKeysWorkspaceConnectionProperties",
+ "ManagedIdentity": "ManagedIdentityAuthTypeWorkspaceConnectionProperties",
+ "None": "NoneAuthTypeWorkspaceConnectionProperties",
+ "PAT": "PATAuthTypeWorkspaceConnectionProperties",
+ "SAS": "SASAuthTypeWorkspaceConnectionProperties",
+ "ServicePrincipal": "ServicePrincipalAuthTypeWorkspaceConnectionProperties",
+ "UsernamePassword": "UsernamePasswordAuthTypeWorkspaceConnectionProperties",
+ }
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
+ target: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
+ :keyword target:
+ :paramtype target: str
+ """
+ super().__init__(**kwargs)
+ self.auth_type: Optional[str] = None
+ self.category = category
+ self.created_by_workspace_arm_id = None
+ self.expiry_time = expiry_time
+ self.is_shared_to_all = is_shared_to_all
+ self.metadata = metadata
+ self.target = target
+
+
+class AccessKeyAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """AccessKeyAuthTypeWorkspaceConnectionProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
+ :ivar target:
+ :vartype target: str
+ :ivar credentials:
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionAccessKey
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
+ "target": {"key": "target", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionAccessKey"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
+ target: Optional[str] = None,
+ credentials: Optional["_models.WorkspaceConnectionAccessKey"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
+ :keyword target:
+ :paramtype target: str
+ :keyword credentials:
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionAccessKey
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ metadata=metadata,
+ target=target,
+ **kwargs
+ )
+ self.auth_type: str = "AccessKey"
+ self.credentials = credentials
+
+
class DatastoreCredentials(_serialization.Model):
"""Base definition for datastore credentials.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- AccountKeyDatastoreCredentials, CertificateDatastoreCredentials, NoneDatastoreCredentials,
- SasDatastoreCredentials, ServicePrincipalDatastoreCredentials
+ AccountKeyDatastoreCredentials, CertificateDatastoreCredentials, KerberosKeytabCredentials,
+ KerberosPasswordCredentials, NoneDatastoreCredentials, SasDatastoreCredentials,
+ ServicePrincipalDatastoreCredentials
All required parameters must be populated in order to send to Azure.
:ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", "ServicePrincipal",
+ "KerberosKeytab", and "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
"""
@@ -50,6 +242,8 @@ class DatastoreCredentials(_serialization.Model):
"credentials_type": {
"AccountKey": "AccountKeyDatastoreCredentials",
"Certificate": "CertificateDatastoreCredentials",
+ "KerberosKeytab": "KerberosKeytabCredentials",
+ "KerberosPassword": "KerberosPasswordCredentials",
"None": "NoneDatastoreCredentials",
"Sas": "SasDatastoreCredentials",
"ServicePrincipal": "ServicePrincipalDatastoreCredentials",
@@ -68,7 +262,8 @@ class AccountKeyDatastoreCredentials(DatastoreCredentials):
All required parameters must be populated in order to send to Azure.
:ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", "ServicePrincipal",
+ "KerberosKeytab", and "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
:ivar secrets: [Required] Storage account secrets. Required.
:vartype secrets: ~azure.mgmt.machinelearningservices.models.AccountKeyDatastoreSecrets
@@ -98,13 +293,14 @@ class DatastoreSecrets(_serialization.Model):
"""Base definition for datastore secrets.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- AccountKeyDatastoreSecrets, CertificateDatastoreSecrets, SasDatastoreSecrets,
- ServicePrincipalDatastoreSecrets
+ AccountKeyDatastoreSecrets, CertificateDatastoreSecrets, KerberosKeytabSecrets,
+ KerberosPasswordSecrets, SasDatastoreSecrets, ServicePrincipalDatastoreSecrets
All required parameters must be populated in order to send to Azure.
:ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
- Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
+ Known values are: "AccountKey", "Certificate", "Sas", "ServicePrincipal", "KerberosPassword",
+ and "KerberosKeytab".
:vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
"""
@@ -120,6 +316,8 @@ class DatastoreSecrets(_serialization.Model):
"secrets_type": {
"AccountKey": "AccountKeyDatastoreSecrets",
"Certificate": "CertificateDatastoreSecrets",
+ "KerberosKeytab": "KerberosKeytabSecrets",
+ "KerberosPassword": "KerberosPasswordSecrets",
"Sas": "SasDatastoreSecrets",
"ServicePrincipal": "ServicePrincipalDatastoreSecrets",
}
@@ -137,7 +335,8 @@ class AccountKeyDatastoreSecrets(DatastoreSecrets):
All required parameters must be populated in order to send to Azure.
:ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
- Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
+ Known values are: "AccountKey", "Certificate", "Sas", "ServicePrincipal", "KerberosPassword",
+ and "KerberosKeytab".
:vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
:ivar key: Storage account key.
:vartype key: str
@@ -728,6 +927,69 @@ def __init__(
self.load_balancer_subnet = load_balancer_subnet
+class MonitoringFeatureFilterBase(_serialization.Model):
+ """MonitoringFeatureFilterBase.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ AllFeatures, FeatureSubset, TopNFeaturesByAttribution
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar filter_type: [Required] Specifies the feature filter to leverage when selecting features
+ to calculate metrics over. Required. Known values are: "AllFeatures", "TopNByAttribution", and
+ "FeatureSubset".
+ :vartype filter_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterType
+ """
+
+ _validation = {
+ "filter_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "filter_type": {"key": "filterType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "filter_type": {
+ "AllFeatures": "AllFeatures",
+ "FeatureSubset": "FeatureSubset",
+ "TopNByAttribution": "TopNFeaturesByAttribution",
+ }
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.filter_type: Optional[str] = None
+
+
+class AllFeatures(MonitoringFeatureFilterBase):
+ """AllFeatures.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar filter_type: [Required] Specifies the feature filter to leverage when selecting features
+ to calculate metrics over. Required. Known values are: "AllFeatures", "TopNByAttribution", and
+ "FeatureSubset".
+ :vartype filter_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterType
+ """
+
+ _validation = {
+ "filter_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "filter_type": {"key": "filterType", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.filter_type: str = "AllFeatures"
+
+
class Nodes(_serialization.Model):
"""Abstract Nodes definition.
@@ -736,7 +998,8 @@ class Nodes(_serialization.Model):
All required parameters must be populated in order to send to Azure.
- :ivar nodes_value_type: [Required] Type of the Nodes value. Required. "All"
+ :ivar nodes_value_type: [Required] Type of the Nodes value. Required. Known values are: "All"
+ and "Custom".
:vartype nodes_value_type: str or ~azure.mgmt.machinelearningservices.models.NodesValueType
"""
@@ -761,7 +1024,8 @@ class AllNodes(Nodes):
All required parameters must be populated in order to send to Azure.
- :ivar nodes_value_type: [Required] Type of the Nodes value. Required. "All"
+ :ivar nodes_value_type: [Required] Type of the Nodes value. Required. Known values are: "All"
+ and "Custom".
:vartype nodes_value_type: str or ~azure.mgmt.machinelearningservices.models.NodesValueType
"""
@@ -1147,112 +1411,6 @@ def __init__(
self.property_bag = property_bag
-class AmlOperation(_serialization.Model):
- """Azure Machine Learning workspace REST API operation.
-
- :ivar name: Operation name: {provider}/{resource}/{operation}.
- :vartype name: str
- :ivar display: Display name of operation.
- :vartype display: ~azure.mgmt.machinelearningservices.models.AmlOperationDisplay
- :ivar is_data_action: Indicates whether the operation applies to data-plane.
- :vartype is_data_action: bool
- """
-
- _attribute_map = {
- "name": {"key": "name", "type": "str"},
- "display": {"key": "display", "type": "AmlOperationDisplay"},
- "is_data_action": {"key": "isDataAction", "type": "bool"},
- }
-
- def __init__(
- self,
- *,
- name: Optional[str] = None,
- display: Optional["_models.AmlOperationDisplay"] = None,
- is_data_action: Optional[bool] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword name: Operation name: {provider}/{resource}/{operation}.
- :paramtype name: str
- :keyword display: Display name of operation.
- :paramtype display: ~azure.mgmt.machinelearningservices.models.AmlOperationDisplay
- :keyword is_data_action: Indicates whether the operation applies to data-plane.
- :paramtype is_data_action: bool
- """
- super().__init__(**kwargs)
- self.name = name
- self.display = display
- self.is_data_action = is_data_action
-
-
-class AmlOperationDisplay(_serialization.Model):
- """Display name of operation.
-
- :ivar provider: The resource provider name: Microsoft.MachineLearningExperimentation.
- :vartype provider: str
- :ivar resource: The resource on which the operation is performed.
- :vartype resource: str
- :ivar operation: The operation that users can perform.
- :vartype operation: str
- :ivar description: The description for the operation.
- :vartype description: str
- """
-
- _attribute_map = {
- "provider": {"key": "provider", "type": "str"},
- "resource": {"key": "resource", "type": "str"},
- "operation": {"key": "operation", "type": "str"},
- "description": {"key": "description", "type": "str"},
- }
-
- def __init__(
- self,
- *,
- provider: Optional[str] = None,
- resource: Optional[str] = None,
- operation: Optional[str] = None,
- description: Optional[str] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword provider: The resource provider name: Microsoft.MachineLearningExperimentation.
- :paramtype provider: str
- :keyword resource: The resource on which the operation is performed.
- :paramtype resource: str
- :keyword operation: The operation that users can perform.
- :paramtype operation: str
- :keyword description: The description for the operation.
- :paramtype description: str
- """
- super().__init__(**kwargs)
- self.provider = provider
- self.resource = resource
- self.operation = operation
- self.description = description
-
-
-class AmlOperationListResult(_serialization.Model):
- """An array of operations supported by the resource provider.
-
- :ivar value: List of AML workspace operations supported by the AML workspace resource provider.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.AmlOperation]
- """
-
- _attribute_map = {
- "value": {"key": "value", "type": "[AmlOperation]"},
- }
-
- def __init__(self, *, value: Optional[List["_models.AmlOperation"]] = None, **kwargs: Any) -> None:
- """
- :keyword value: List of AML workspace operations supported by the AML workspace resource
- provider.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.AmlOperation]
- """
- super().__init__(**kwargs)
- self.value = value
-
-
class IdentityConfiguration(_serialization.Model):
"""Base definition for identity configuration.
@@ -1310,6 +1468,63 @@ def __init__(self, **kwargs: Any) -> None:
self.identity_type: str = "AMLToken"
+class MonitorComputeIdentityBase(_serialization.Model):
+ """Monitor compute identity base definition.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ AmlTokenComputeIdentity, ManagedComputeIdentity
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar compute_identity_type: [Required] Monitor compute identity type enum. Required. Known
+ values are: "AmlToken" and "ManagedIdentity".
+ :vartype compute_identity_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityType
+ """
+
+ _validation = {
+ "compute_identity_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "compute_identity_type": {"key": "computeIdentityType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "compute_identity_type": {"AmlToken": "AmlTokenComputeIdentity", "ManagedIdentity": "ManagedComputeIdentity"}
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.compute_identity_type: Optional[str] = None
+
+
+class AmlTokenComputeIdentity(MonitorComputeIdentityBase):
+ """AML token compute identity definition.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar compute_identity_type: [Required] Monitor compute identity type enum. Required. Known
+ values are: "AmlToken" and "ManagedIdentity".
+ :vartype compute_identity_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityType
+ """
+
+ _validation = {
+ "compute_identity_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "compute_identity_type": {"key": "computeIdentityType", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.compute_identity_type: str = "AmlToken"
+
+
class AmlUserFeature(_serialization.Model):
"""Features enabled for a workspace.
@@ -1349,23 +1564,133 @@ def __init__(
self.description = description
-class ArmResourceId(_serialization.Model):
- """ARM ResourceId of a resource.
+class ApiKeyAuthWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """This connection type covers the generic ApiKey auth connection categories, for examples:
+ AzureOpenAI:
+ Category:= AzureOpenAI
+ AuthType:= ApiKey (as type discriminator)
+ Credentials:= {ApiKey} as
+ Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.ApiKey
+ Target:= {ApiBase}
- :ivar resource_id: Arm ResourceId is in the format
- "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Storage/storageAccounts/{StorageAccountName}"
- or
- "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{AcrName}".
- :vartype resource_id: str
- """
+ CognitiveService:
+ Category:= CognitiveService
+ AuthType:= ApiKey (as type discriminator)
+ Credentials:= {SubscriptionKey} as
+ Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.ApiKey
+ Target:= ServiceRegion={serviceRegion}
- _attribute_map = {
- "resource_id": {"key": "resourceId", "type": "str"},
- }
+ CognitiveSearch:
+ Category:= CognitiveSearch
+ AuthType:= ApiKey (as type discriminator)
+ Credentials:= {Key} as
+ Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.ApiKey
+ Target:= {Endpoint}
- def __init__(self, *, resource_id: Optional[str] = None, **kwargs: Any) -> None:
- """
- :keyword resource_id: Arm ResourceId is in the format
+ Use Metadata property bag for ApiType, ApiVersion, Kind and other metadata fields.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
+ :ivar target:
+ :vartype target: str
+ :ivar credentials: Api key object for workspace connection credential.
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionApiKey
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
+ "target": {"key": "target", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionApiKey"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
+ target: Optional[str] = None,
+ credentials: Optional["_models.WorkspaceConnectionApiKey"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
+ :keyword target:
+ :paramtype target: str
+ :keyword credentials: Api key object for workspace connection credential.
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionApiKey
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ metadata=metadata,
+ target=target,
+ **kwargs
+ )
+ self.auth_type: str = "ApiKey"
+ self.credentials = credentials
+
+
+class ArmResourceId(_serialization.Model):
+ """ARM ResourceId of a resource.
+
+ :ivar resource_id: Arm ResourceId is in the format
+ "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Storage/storageAccounts/{StorageAccountName}"
+ or
+ "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{AcrName}".
+ :vartype resource_id: str
+ """
+
+ _attribute_map = {
+ "resource_id": {"key": "resourceId", "type": "str"},
+ }
+
+ def __init__(self, *, resource_id: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword resource_id: Arm ResourceId is in the format
"/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Storage/storageAccounts/{StorageAccountName}"
or
"/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{AcrName}".
@@ -1423,9 +1748,13 @@ class AssetBase(ResourceBase):
:vartype properties: dict[str, str]
:ivar tags: Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:vartype is_archived: bool
"""
@@ -1433,6 +1762,7 @@ class AssetBase(ResourceBase):
"description": {"key": "description", "type": "str"},
"properties": {"key": "properties", "type": "{str}"},
"tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
"is_anonymous": {"key": "isAnonymous", "type": "bool"},
"is_archived": {"key": "isArchived", "type": "bool"},
}
@@ -1443,6 +1773,7 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
is_anonymous: bool = False,
is_archived: bool = False,
**kwargs: Any
@@ -1454,12 +1785,17 @@ def __init__(
:paramtype properties: dict[str, str]
:keyword tags: Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:paramtype is_archived: bool
"""
super().__init__(description=description, properties=properties, tags=tags, **kwargs)
+ self.auto_delete_setting = auto_delete_setting
self.is_anonymous = is_anonymous
self.is_archived = is_archived
@@ -1561,13 +1897,23 @@ def __init__(
class AssetJobOutput(_serialization.Model):
"""Asset output type.
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :ivar asset_name: Output Asset Name.
+ :vartype asset_name: str
+ :ivar asset_version: Output Asset Version.
+ :vartype asset_version: str
+ :ivar auto_delete_setting: Auto delete setting of output data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
"""
_attribute_map = {
+ "asset_name": {"key": "assetName", "type": "str"},
+ "asset_version": {"key": "assetVersion", "type": "str"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
"mode": {"key": "mode", "type": "str"},
"uri": {"key": "uri", "type": "str"},
}
@@ -1575,17 +1921,30 @@ class AssetJobOutput(_serialization.Model):
def __init__(
self,
*,
+ asset_name: Optional[str] = None,
+ asset_version: Optional[str] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :keyword asset_name: Output Asset Name.
+ :paramtype asset_name: str
+ :keyword asset_version: Output Asset Version.
+ :paramtype asset_version: str
+ :keyword auto_delete_setting: Auto delete setting of output data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
"""
super().__init__(**kwargs)
+ self.asset_name = asset_name
+ self.asset_version = asset_version
+ self.auto_delete_setting = auto_delete_setting
self.mode = mode
self.uri = uri
@@ -1658,6 +2017,40 @@ def __init__(self, *, object_id: str, tenant_id: str, **kwargs: Any) -> None:
self.tenant_id = tenant_id
+class AutoDeleteSetting(_serialization.Model):
+ """AutoDeleteSetting.
+
+ :ivar condition: When to check if an asset is expired. Known values are: "CreatedGreaterThan"
+ and "LastAccessedGreaterThan".
+ :vartype condition: str or ~azure.mgmt.machinelearningservices.models.AutoDeleteCondition
+ :ivar value: Expiration condition value.
+ :vartype value: str
+ """
+
+ _attribute_map = {
+ "condition": {"key": "condition", "type": "str"},
+ "value": {"key": "value", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ condition: Optional[Union[str, "_models.AutoDeleteCondition"]] = None,
+ value: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword condition: When to check if an asset is expired. Known values are:
+ "CreatedGreaterThan" and "LastAccessedGreaterThan".
+ :paramtype condition: str or ~azure.mgmt.machinelearningservices.models.AutoDeleteCondition
+ :keyword value: Expiration condition value.
+ :paramtype value: str
+ """
+ super().__init__(**kwargs)
+ self.condition = condition
+ self.value = value
+
+
class ForecastHorizon(_serialization.Model):
"""The desired maximum forecast horizon in units of time-series frequency.
@@ -1711,11 +2104,41 @@ def __init__(self, **kwargs: Any) -> None:
self.mode: str = "Auto"
+class AutologgerSettings(_serialization.Model):
+ """Settings for Autologger.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar mlflow_autologger: [Required] Indicates whether mlflow autologger is enabled. Required.
+ Known values are: "Enabled" and "Disabled".
+ :vartype mlflow_autologger: str or
+ ~azure.mgmt.machinelearningservices.models.MLFlowAutologgerState
+ """
+
+ _validation = {
+ "mlflow_autologger": {"required": True},
+ }
+
+ _attribute_map = {
+ "mlflow_autologger": {"key": "mlflowAutologger", "type": "str"},
+ }
+
+ def __init__(self, *, mlflow_autologger: Union[str, "_models.MLFlowAutologgerState"], **kwargs: Any) -> None:
+ """
+ :keyword mlflow_autologger: [Required] Indicates whether mlflow autologger is enabled.
+ Required. Known values are: "Enabled" and "Disabled".
+ :paramtype mlflow_autologger: str or
+ ~azure.mgmt.machinelearningservices.models.MLFlowAutologgerState
+ """
+ super().__init__(**kwargs)
+ self.mlflow_autologger = mlflow_autologger
+
+
class JobBaseProperties(ResourceBase): # pylint: disable=too-many-instance-attributes
"""Base definition for a job.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- AutoMLJob, CommandJob, PipelineJob, SweepJob
+ AutoMLJob, CommandJob, LabelingJobProperties, PipelineJob, SparkJob, SweepJob
Variables are only populated by the server, and will be ignored when sending a request.
@@ -1743,14 +2166,19 @@ class JobBaseProperties(ResourceBase): # pylint: disable=too-many-instance-attr
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
+ "Command", "Labeling", "Sweep", "Pipeline", and "Spark".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar secrets_configuration: Configuration for secrets to be made available during runtime.
+ :vartype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
"Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
- "Canceled", "NotResponding", "Paused", and "Unknown".
+ "Canceled", "NotResponding", "Paused", "Unknown", and "Scheduled".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
"""
@@ -1770,12 +2198,21 @@ class JobBaseProperties(ResourceBase): # pylint: disable=too-many-instance-attr
"identity": {"key": "identity", "type": "IdentityConfiguration"},
"is_archived": {"key": "isArchived", "type": "bool"},
"job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
+ "secrets_configuration": {"key": "secretsConfiguration", "type": "{SecretConfiguration}"},
"services": {"key": "services", "type": "{JobService}"},
"status": {"key": "status", "type": "str"},
}
_subtype_map = {
- "job_type": {"AutoML": "AutoMLJob", "Command": "CommandJob", "Pipeline": "PipelineJob", "Sweep": "SweepJob"}
+ "job_type": {
+ "AutoML": "AutoMLJob",
+ "Command": "CommandJob",
+ "Labeling": "LabelingJobProperties",
+ "Pipeline": "PipelineJob",
+ "Spark": "SparkJob",
+ "Sweep": "SweepJob",
+ }
}
def __init__(
@@ -1790,6 +2227,8 @@ def __init__(
experiment_name: str = "Default",
identity: Optional["_models.IdentityConfiguration"] = None,
is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
+ secrets_configuration: Optional[Dict[str, "_models.SecretConfiguration"]] = None,
services: Optional[Dict[str, "_models.JobService"]] = None,
**kwargs: Any
) -> None:
@@ -1815,6 +2254,11 @@ def __init__(
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword secrets_configuration: Configuration for secrets to be made available during runtime.
+ :paramtype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -1827,6 +2271,8 @@ def __init__(
self.identity = identity
self.is_archived = is_archived
self.job_type: Optional[str] = None
+ self.notification_setting = notification_setting
+ self.secrets_configuration = secrets_configuration
self.services = services
self.status = None
@@ -1862,14 +2308,19 @@ class AutoMLJob(JobBaseProperties): # pylint: disable=too-many-instance-attribu
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
+ "Command", "Labeling", "Sweep", "Pipeline", and "Spark".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar secrets_configuration: Configuration for secrets to be made available during runtime.
+ :vartype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
"Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
- "Canceled", "NotResponding", "Paused", and "Unknown".
+ "Canceled", "NotResponding", "Paused", "Unknown", and "Scheduled".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
:ivar environment_id: The ARM resource ID of the Environment specification for the job.
This is optional value to provide, if not provided, AutoML will default this to Production
@@ -1879,6 +2330,8 @@ class AutoMLJob(JobBaseProperties): # pylint: disable=too-many-instance-attribu
:vartype environment_variables: dict[str, str]
:ivar outputs: Mapping of output data bindings used in the job.
:vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :ivar queue_settings: Queue settings for the job.
+ :vartype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
:ivar resources: Compute Resource configuration for the job.
:vartype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
:ivar task_details: [Required] This represents scenario which can be one of Tables/NLP/Image.
@@ -1903,11 +2356,14 @@ class AutoMLJob(JobBaseProperties): # pylint: disable=too-many-instance-attribu
"identity": {"key": "identity", "type": "IdentityConfiguration"},
"is_archived": {"key": "isArchived", "type": "bool"},
"job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
+ "secrets_configuration": {"key": "secretsConfiguration", "type": "{SecretConfiguration}"},
"services": {"key": "services", "type": "{JobService}"},
"status": {"key": "status", "type": "str"},
"environment_id": {"key": "environmentId", "type": "str"},
"environment_variables": {"key": "environmentVariables", "type": "{str}"},
"outputs": {"key": "outputs", "type": "{JobOutput}"},
+ "queue_settings": {"key": "queueSettings", "type": "QueueSettings"},
"resources": {"key": "resources", "type": "JobResourceConfiguration"},
"task_details": {"key": "taskDetails", "type": "AutoMLVertical"},
}
@@ -1925,10 +2381,13 @@ def __init__(
experiment_name: str = "Default",
identity: Optional["_models.IdentityConfiguration"] = None,
is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
+ secrets_configuration: Optional[Dict[str, "_models.SecretConfiguration"]] = None,
services: Optional[Dict[str, "_models.JobService"]] = None,
environment_id: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ queue_settings: Optional["_models.QueueSettings"] = None,
resources: Optional["_models.JobResourceConfiguration"] = None,
**kwargs: Any
) -> None:
@@ -1954,6 +2413,11 @@ def __init__(
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword secrets_configuration: Configuration for secrets to be made available during runtime.
+ :paramtype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -1965,6 +2429,8 @@ def __init__(
:paramtype environment_variables: dict[str, str]
:keyword outputs: Mapping of output data bindings used in the job.
:paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword queue_settings: Queue settings for the job.
+ :paramtype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
:keyword resources: Compute Resource configuration for the job.
:paramtype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
:keyword task_details: [Required] This represents scenario which can be one of
@@ -1981,6 +2447,8 @@ def __init__(
experiment_name=experiment_name,
identity=identity,
is_archived=is_archived,
+ notification_setting=notification_setting,
+ secrets_configuration=secrets_configuration,
services=services,
**kwargs
)
@@ -1988,6 +2456,7 @@ def __init__(
self.environment_id = environment_id
self.environment_variables = environment_variables
self.outputs = outputs
+ self.queue_settings = queue_settings
self.resources = resources
self.task_details = task_details
@@ -2351,7 +2820,8 @@ class DatastoreProperties(ResourceBase):
"""Base definition for datastore contents configuration.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- AzureBlobDatastore, AzureDataLakeGen1Datastore, AzureDataLakeGen2Datastore, AzureFileDatastore
+ AzureBlobDatastore, AzureDataLakeGen1Datastore, AzureDataLakeGen2Datastore, AzureFileDatastore,
+ HdfsDatastore, OneLakeDatastore
Variables are only populated by the server, and will be ignored when sending a request.
@@ -2366,8 +2836,10 @@ class DatastoreProperties(ResourceBase):
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", "Hdfs", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
+ :ivar intellectual_property: Intellectual Property details.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
@@ -2385,6 +2857,7 @@ class DatastoreProperties(ResourceBase):
"tags": {"key": "tags", "type": "{str}"},
"credentials": {"key": "credentials", "type": "DatastoreCredentials"},
"datastore_type": {"key": "datastoreType", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
"is_default": {"key": "isDefault", "type": "bool"},
}
@@ -2394,6 +2867,8 @@ class DatastoreProperties(ResourceBase):
"AzureDataLakeGen1": "AzureDataLakeGen1Datastore",
"AzureDataLakeGen2": "AzureDataLakeGen2Datastore",
"AzureFile": "AzureFileDatastore",
+ "Hdfs": "HdfsDatastore",
+ "OneLake": "OneLakeDatastore",
}
}
@@ -2404,6 +2879,7 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
**kwargs: Any
) -> None:
"""
@@ -2415,14 +2891,46 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword credentials: [Required] Account credentials. Required.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword intellectual_property: Intellectual Property details.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
"""
super().__init__(description=description, properties=properties, tags=tags, **kwargs)
self.credentials = credentials
self.datastore_type: Optional[str] = None
+ self.intellectual_property = intellectual_property
self.is_default = None
-class AzureBlobDatastore(DatastoreProperties): # pylint: disable=too-many-instance-attributes
+class AzureDatastore(_serialization.Model):
+ """Base definition for Azure datastore contents configuration.
+
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
+ """
+
+ _attribute_map = {
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
+ }
+
+ def __init__(
+ self, *, resource_group: Optional[str] = None, subscription_id: Optional[str] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
+ """
+ super().__init__(**kwargs)
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
+
+
+class AzureBlobDatastore(AzureDatastore, DatastoreProperties): # pylint: disable=too-many-instance-attributes
"""Azure Blob datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -2438,11 +2946,17 @@ class AzureBlobDatastore(DatastoreProperties): # pylint: disable=too-many-insta
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", "Hdfs", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
+ :ivar intellectual_property: Intellectual Property details.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
:ivar account_name: Storage account name.
:vartype account_name: str
:ivar container_name: Storage account container name.
@@ -2470,7 +2984,10 @@ class AzureBlobDatastore(DatastoreProperties): # pylint: disable=too-many-insta
"tags": {"key": "tags", "type": "{str}"},
"credentials": {"key": "credentials", "type": "DatastoreCredentials"},
"datastore_type": {"key": "datastoreType", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
"is_default": {"key": "isDefault", "type": "bool"},
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
"account_name": {"key": "accountName", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"endpoint": {"key": "endpoint", "type": "str"},
@@ -2485,6 +3002,9 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ resource_group: Optional[str] = None,
+ subscription_id: Optional[str] = None,
account_name: Optional[str] = None,
container_name: Optional[str] = None,
endpoint: Optional[str] = None,
@@ -2501,6 +3021,13 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword credentials: [Required] Account credentials. Required.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword intellectual_property: Intellectual Property details.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
:keyword account_name: Storage account name.
:paramtype account_name: str
:keyword container_name: Storage account container name.
@@ -2515,16 +3042,33 @@ def __init__(
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
- super().__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
+ super().__init__(
+ resource_group=resource_group,
+ subscription_id=subscription_id,
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ intellectual_property=intellectual_property,
+ **kwargs
+ )
+ self.description = description
+ self.properties = properties
+ self.tags = tags
+ self.credentials = credentials
self.datastore_type: str = "AzureBlob"
+ self.intellectual_property = intellectual_property
+ self.is_default = None
self.account_name = account_name
self.container_name = container_name
self.endpoint = endpoint
self.protocol = protocol
self.service_data_access_auth_identity = service_data_access_auth_identity
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
-class AzureDataLakeGen1Datastore(DatastoreProperties):
+class AzureDataLakeGen1Datastore(AzureDatastore, DatastoreProperties): # pylint: disable=too-many-instance-attributes
"""Azure Data Lake Gen1 datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -2540,11 +3084,17 @@ class AzureDataLakeGen1Datastore(DatastoreProperties):
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", "Hdfs", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
+ :ivar intellectual_property: Intellectual Property details.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
:ivar service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Known values are: "None",
"WorkspaceSystemAssignedIdentity", and "WorkspaceUserAssignedIdentity".
@@ -2567,7 +3117,10 @@ class AzureDataLakeGen1Datastore(DatastoreProperties):
"tags": {"key": "tags", "type": "{str}"},
"credentials": {"key": "credentials", "type": "DatastoreCredentials"},
"datastore_type": {"key": "datastoreType", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
"is_default": {"key": "isDefault", "type": "bool"},
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
"service_data_access_auth_identity": {"key": "serviceDataAccessAuthIdentity", "type": "str"},
"store_name": {"key": "storeName", "type": "str"},
}
@@ -2580,6 +3133,9 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ resource_group: Optional[str] = None,
+ subscription_id: Optional[str] = None,
service_data_access_auth_identity: Optional[Union[str, "_models.ServiceDataAccessAuthIdentity"]] = None,
**kwargs: Any
) -> None:
@@ -2592,6 +3148,13 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword credentials: [Required] Account credentials. Required.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword intellectual_property: Intellectual Property details.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
:keyword service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Known values are: "None",
"WorkspaceSystemAssignedIdentity", and "WorkspaceUserAssignedIdentity".
@@ -2600,13 +3163,30 @@ def __init__(
:keyword store_name: [Required] Azure Data Lake store name. Required.
:paramtype store_name: str
"""
- super().__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
+ super().__init__(
+ resource_group=resource_group,
+ subscription_id=subscription_id,
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ intellectual_property=intellectual_property,
+ **kwargs
+ )
+ self.description = description
+ self.properties = properties
+ self.tags = tags
+ self.credentials = credentials
self.datastore_type: str = "AzureDataLakeGen1"
+ self.intellectual_property = intellectual_property
+ self.is_default = None
self.service_data_access_auth_identity = service_data_access_auth_identity
self.store_name = store_name
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
-class AzureDataLakeGen2Datastore(DatastoreProperties): # pylint: disable=too-many-instance-attributes
+class AzureDataLakeGen2Datastore(AzureDatastore, DatastoreProperties): # pylint: disable=too-many-instance-attributes
"""Azure Data Lake Gen2 datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -2622,11 +3202,17 @@ class AzureDataLakeGen2Datastore(DatastoreProperties): # pylint: disable=too-ma
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", "Hdfs", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
+ :ivar intellectual_property: Intellectual Property details.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
:ivar account_name: [Required] Storage account name. Required.
:vartype account_name: str
:ivar endpoint: Azure cloud endpoint for the storage account.
@@ -2656,7 +3242,10 @@ class AzureDataLakeGen2Datastore(DatastoreProperties): # pylint: disable=too-ma
"tags": {"key": "tags", "type": "{str}"},
"credentials": {"key": "credentials", "type": "DatastoreCredentials"},
"datastore_type": {"key": "datastoreType", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
"is_default": {"key": "isDefault", "type": "bool"},
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
"account_name": {"key": "accountName", "type": "str"},
"endpoint": {"key": "endpoint", "type": "str"},
"filesystem": {"key": "filesystem", "type": "str"},
@@ -2673,6 +3262,9 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ resource_group: Optional[str] = None,
+ subscription_id: Optional[str] = None,
endpoint: Optional[str] = None,
protocol: Optional[str] = None,
service_data_access_auth_identity: Optional[Union[str, "_models.ServiceDataAccessAuthIdentity"]] = None,
@@ -2687,6 +3279,13 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword credentials: [Required] Account credentials. Required.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword intellectual_property: Intellectual Property details.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
:keyword account_name: [Required] Storage account name. Required.
:paramtype account_name: str
:keyword endpoint: Azure cloud endpoint for the storage account.
@@ -2701,16 +3300,99 @@ def __init__(
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
- super().__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
+ super().__init__(
+ resource_group=resource_group,
+ subscription_id=subscription_id,
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ intellectual_property=intellectual_property,
+ **kwargs
+ )
+ self.description = description
+ self.properties = properties
+ self.tags = tags
+ self.credentials = credentials
self.datastore_type: str = "AzureDataLakeGen2"
+ self.intellectual_property = intellectual_property
+ self.is_default = None
self.account_name = account_name
self.endpoint = endpoint
self.filesystem = filesystem
self.protocol = protocol
self.service_data_access_auth_identity = service_data_access_auth_identity
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
+
+
+class Webhook(_serialization.Model):
+ """Webhook base.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ AzureDevOpsWebhook
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar event_type: Send callback on a specified notification event.
+ :vartype event_type: str
+ :ivar webhook_type: [Required] Specifies the type of service to send a callback. Required.
+ "AzureDevOps"
+ :vartype webhook_type: str or ~azure.mgmt.machinelearningservices.models.WebhookType
+ """
+
+ _validation = {
+ "webhook_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "event_type": {"key": "eventType", "type": "str"},
+ "webhook_type": {"key": "webhookType", "type": "str"},
+ }
+
+ _subtype_map = {"webhook_type": {"AzureDevOps": "AzureDevOpsWebhook"}}
+
+ def __init__(self, *, event_type: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword event_type: Send callback on a specified notification event.
+ :paramtype event_type: str
+ """
+ super().__init__(**kwargs)
+ self.event_type = event_type
+ self.webhook_type: Optional[str] = None
+
+
+class AzureDevOpsWebhook(Webhook):
+ """Webhook details specific for Azure DevOps.
+
+ All required parameters must be populated in order to send to Azure.
+ :ivar event_type: Send callback on a specified notification event.
+ :vartype event_type: str
+ :ivar webhook_type: [Required] Specifies the type of service to send a callback. Required.
+ "AzureDevOps"
+ :vartype webhook_type: str or ~azure.mgmt.machinelearningservices.models.WebhookType
+ """
+
+ _validation = {
+ "webhook_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "event_type": {"key": "eventType", "type": "str"},
+ "webhook_type": {"key": "webhookType", "type": "str"},
+ }
+
+ def __init__(self, *, event_type: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword event_type: Send callback on a specified notification event.
+ :paramtype event_type: str
+ """
+ super().__init__(event_type=event_type, **kwargs)
+ self.webhook_type: str = "AzureDevOps"
-class AzureFileDatastore(DatastoreProperties): # pylint: disable=too-many-instance-attributes
+
+class AzureFileDatastore(AzureDatastore, DatastoreProperties): # pylint: disable=too-many-instance-attributes
"""Azure File datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -2726,11 +3408,17 @@ class AzureFileDatastore(DatastoreProperties): # pylint: disable=too-many-insta
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", "Hdfs", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
+ :ivar intellectual_property: Intellectual Property details.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
:ivar account_name: [Required] Storage account name. Required.
:vartype account_name: str
:ivar endpoint: Azure cloud endpoint for the storage account.
@@ -2761,7 +3449,10 @@ class AzureFileDatastore(DatastoreProperties): # pylint: disable=too-many-insta
"tags": {"key": "tags", "type": "{str}"},
"credentials": {"key": "credentials", "type": "DatastoreCredentials"},
"datastore_type": {"key": "datastoreType", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
"is_default": {"key": "isDefault", "type": "bool"},
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
"account_name": {"key": "accountName", "type": "str"},
"endpoint": {"key": "endpoint", "type": "str"},
"file_share_name": {"key": "fileShareName", "type": "str"},
@@ -2778,6 +3469,9 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ resource_group: Optional[str] = None,
+ subscription_id: Optional[str] = None,
endpoint: Optional[str] = None,
protocol: Optional[str] = None,
service_data_access_auth_identity: Optional[Union[str, "_models.ServiceDataAccessAuthIdentity"]] = None,
@@ -2792,6 +3486,13 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword credentials: [Required] Account credentials. Required.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword intellectual_property: Intellectual Property details.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
:keyword account_name: [Required] Storage account name. Required.
:paramtype account_name: str
:keyword endpoint: Azure cloud endpoint for the storage account.
@@ -2807,45 +3508,161 @@ def __init__(
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
- super().__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
+ super().__init__(
+ resource_group=resource_group,
+ subscription_id=subscription_id,
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ intellectual_property=intellectual_property,
+ **kwargs
+ )
+ self.description = description
+ self.properties = properties
+ self.tags = tags
+ self.credentials = credentials
self.datastore_type: str = "AzureFile"
+ self.intellectual_property = intellectual_property
+ self.is_default = None
self.account_name = account_name
self.endpoint = endpoint
self.file_share_name = file_share_name
self.protocol = protocol
self.service_data_access_auth_identity = service_data_access_auth_identity
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
-class EarlyTerminationPolicy(_serialization.Model):
- """Early termination policies enable canceling poor-performing runs before they complete.
+class InferencingServer(_serialization.Model):
+ """InferencingServer.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- BanditPolicy, MedianStoppingPolicy, TruncationSelectionPolicy
+ AzureMLBatchInferencingServer, AzureMLOnlineInferencingServer, CustomInferencingServer,
+ TritonInferencingServer
All required parameters must be populated in order to send to Azure.
- :ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
- :vartype delay_evaluation: int
- :ivar evaluation_interval: Interval (number of runs) between policy evaluations.
- :vartype evaluation_interval: int
- :ivar policy_type: [Required] Name of policy configuration. Required. Known values are:
- "Bandit", "MedianStopping", and "TruncationSelection".
- :vartype policy_type: str or
- ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
+ :ivar server_type: [Required] Inferencing server type for various targets. Required. Known
+ values are: "AzureMLOnline", "AzureMLBatch", "Triton", and "Custom".
+ :vartype server_type: str or ~azure.mgmt.machinelearningservices.models.InferencingServerType
"""
_validation = {
- "policy_type": {"required": True},
+ "server_type": {"required": True},
}
_attribute_map = {
- "delay_evaluation": {"key": "delayEvaluation", "type": "int"},
- "evaluation_interval": {"key": "evaluationInterval", "type": "int"},
- "policy_type": {"key": "policyType", "type": "str"},
+ "server_type": {"key": "serverType", "type": "str"},
}
_subtype_map = {
- "policy_type": {
+ "server_type": {
+ "AzureMLBatch": "AzureMLBatchInferencingServer",
+ "AzureMLOnline": "AzureMLOnlineInferencingServer",
+ "Custom": "CustomInferencingServer",
+ "Triton": "TritonInferencingServer",
+ }
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.server_type: Optional[str] = None
+
+
+class AzureMLBatchInferencingServer(InferencingServer):
+ """Azure ML batch inferencing server configurations.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar server_type: [Required] Inferencing server type for various targets. Required. Known
+ values are: "AzureMLOnline", "AzureMLBatch", "Triton", and "Custom".
+ :vartype server_type: str or ~azure.mgmt.machinelearningservices.models.InferencingServerType
+ :ivar code_configuration: Code configuration for AML batch inferencing server.
+ :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ """
+
+ _validation = {
+ "server_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "server_type": {"key": "serverType", "type": "str"},
+ "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
+ }
+
+ def __init__(self, *, code_configuration: Optional["_models.CodeConfiguration"] = None, **kwargs: Any) -> None:
+ """
+ :keyword code_configuration: Code configuration for AML batch inferencing server.
+ :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ """
+ super().__init__(**kwargs)
+ self.server_type: str = "AzureMLBatch"
+ self.code_configuration = code_configuration
+
+
+class AzureMLOnlineInferencingServer(InferencingServer):
+ """Azure ML online inferencing configurations.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar server_type: [Required] Inferencing server type for various targets. Required. Known
+ values are: "AzureMLOnline", "AzureMLBatch", "Triton", and "Custom".
+ :vartype server_type: str or ~azure.mgmt.machinelearningservices.models.InferencingServerType
+ :ivar code_configuration: Code configuration for AML inferencing server.
+ :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ """
+
+ _validation = {
+ "server_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "server_type": {"key": "serverType", "type": "str"},
+ "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
+ }
+
+ def __init__(self, *, code_configuration: Optional["_models.CodeConfiguration"] = None, **kwargs: Any) -> None:
+ """
+ :keyword code_configuration: Code configuration for AML inferencing server.
+ :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ """
+ super().__init__(**kwargs)
+ self.server_type: str = "AzureMLOnline"
+ self.code_configuration = code_configuration
+
+
+class EarlyTerminationPolicy(_serialization.Model):
+ """Early termination policies enable canceling poor-performing runs before they complete.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ BanditPolicy, MedianStoppingPolicy, TruncationSelectionPolicy
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
+ :vartype delay_evaluation: int
+ :ivar evaluation_interval: Interval (number of runs) between policy evaluations.
+ :vartype evaluation_interval: int
+ :ivar policy_type: [Required] Name of policy configuration. Required. Known values are:
+ "Bandit", "MedianStopping", and "TruncationSelection".
+ :vartype policy_type: str or
+ ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
+ """
+
+ _validation = {
+ "policy_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "delay_evaluation": {"key": "delayEvaluation", "type": "int"},
+ "evaluation_interval": {"key": "evaluationInterval", "type": "int"},
+ "policy_type": {"key": "policyType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "policy_type": {
"Bandit": "BanditPolicy",
"MedianStopping": "MedianStoppingPolicy",
"TruncationSelection": "TruncationSelectionPolicy",
@@ -2922,6 +3739,69 @@ def __init__(
self.slack_factor = slack_factor
+class BaseEnvironmentSource(_serialization.Model):
+ """BaseEnvironmentSource.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ BaseEnvironmentId
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar base_environment_source_type: [Required] Base environment type. Required.
+ "EnvironmentAsset"
+ :vartype base_environment_source_type: str or
+ ~azure.mgmt.machinelearningservices.models.BaseEnvironmentSourceType
+ """
+
+ _validation = {
+ "base_environment_source_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "base_environment_source_type": {"key": "baseEnvironmentSourceType", "type": "str"},
+ }
+
+ _subtype_map = {"base_environment_source_type": {"EnvironmentAsset": "BaseEnvironmentId"}}
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.base_environment_source_type: Optional[str] = None
+
+
+class BaseEnvironmentId(BaseEnvironmentSource):
+ """Base environment type.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar base_environment_source_type: [Required] Base environment type. Required.
+ "EnvironmentAsset"
+ :vartype base_environment_source_type: str or
+ ~azure.mgmt.machinelearningservices.models.BaseEnvironmentSourceType
+ :ivar resource_id: [Required] Resource id accepting ArmId or AzureMlId. Required.
+ :vartype resource_id: str
+ """
+
+ _validation = {
+ "base_environment_source_type": {"required": True},
+ "resource_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "base_environment_source_type": {"key": "baseEnvironmentSourceType", "type": "str"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ }
+
+ def __init__(self, *, resource_id: str, **kwargs: Any) -> None:
+ """
+ :keyword resource_id: [Required] Resource id accepting ArmId or AzureMlId. Required.
+ :paramtype resource_id: str
+ """
+ super().__init__(**kwargs)
+ self.base_environment_source_type: str = "EnvironmentAsset"
+ self.resource_id = resource_id
+
+
class Resource(_serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
@@ -3105,6 +3985,38 @@ def __init__(
self.sku = sku
+class BatchDeploymentConfiguration(_serialization.Model):
+ """Properties relevant to different deployment types.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ BatchPipelineComponentDeploymentConfiguration
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar deployment_configuration_type: [Required] The type of the deployment. Required. Known
+ values are: "Model" and "PipelineComponent".
+ :vartype deployment_configuration_type: str or
+ ~azure.mgmt.machinelearningservices.models.BatchDeploymentConfigurationType
+ """
+
+ _validation = {
+ "deployment_configuration_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "deployment_configuration_type": {"key": "deploymentConfigurationType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "deployment_configuration_type": {"PipelineComponent": "BatchPipelineComponentDeploymentConfiguration"}
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.deployment_configuration_type: Optional[str] = None
+
+
class EndpointDeploymentPropertiesBase(_serialization.Model):
"""Base definition for endpoint deployment.
@@ -3112,8 +4024,8 @@ class EndpointDeploymentPropertiesBase(_serialization.Model):
:vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:ivar description: Description of the endpoint deployment.
:vartype description: str
- :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
+ :ivar environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
:vartype environment_id: str
:ivar environment_variables: Environment variables configuration for the deployment.
:vartype environment_variables: dict[str, str]
@@ -3144,8 +4056,8 @@ def __init__(
:paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:keyword description: Description of the endpoint deployment.
:paramtype description: str
- :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
+ :keyword environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
:paramtype environment_id: str
:keyword environment_variables: Environment variables configuration for the deployment.
:paramtype environment_variables: dict[str, str]
@@ -3169,8 +4081,8 @@ class BatchDeploymentProperties(EndpointDeploymentPropertiesBase): # pylint: di
:vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:ivar description: Description of the endpoint deployment.
:vartype description: str
- :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
+ :ivar environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
:vartype environment_id: str
:ivar environment_variables: Environment variables configuration for the deployment.
:vartype environment_variables: dict[str, str]
@@ -3178,6 +4090,9 @@ class BatchDeploymentProperties(EndpointDeploymentPropertiesBase): # pylint: di
:vartype properties: dict[str, str]
:ivar compute: Compute target for batch inference operation.
:vartype compute: str
+ :ivar deployment_configuration: Properties relevant to different deployment types.
+ :vartype deployment_configuration:
+ ~azure.mgmt.machinelearningservices.models.BatchDeploymentConfiguration
:ivar error_threshold: Error threshold, if the error count for the entire input goes above this
value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
@@ -3224,6 +4139,7 @@ class BatchDeploymentProperties(EndpointDeploymentPropertiesBase): # pylint: di
"environment_variables": {"key": "environmentVariables", "type": "{str}"},
"properties": {"key": "properties", "type": "{str}"},
"compute": {"key": "compute", "type": "str"},
+ "deployment_configuration": {"key": "deploymentConfiguration", "type": "BatchDeploymentConfiguration"},
"error_threshold": {"key": "errorThreshold", "type": "int"},
"logging_level": {"key": "loggingLevel", "type": "str"},
"max_concurrency_per_instance": {"key": "maxConcurrencyPerInstance", "type": "int"},
@@ -3245,6 +4161,7 @@ def __init__(
environment_variables: Optional[Dict[str, str]] = None,
properties: Optional[Dict[str, str]] = None,
compute: Optional[str] = None,
+ deployment_configuration: Optional["_models.BatchDeploymentConfiguration"] = None,
error_threshold: int = -1,
logging_level: Optional[Union[str, "_models.BatchLoggingLevel"]] = None,
max_concurrency_per_instance: int = 1,
@@ -3261,8 +4178,8 @@ def __init__(
:paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:keyword description: Description of the endpoint deployment.
:paramtype description: str
- :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
+ :keyword environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
:paramtype environment_id: str
:keyword environment_variables: Environment variables configuration for the deployment.
:paramtype environment_variables: dict[str, str]
@@ -3270,6 +4187,9 @@ def __init__(
:paramtype properties: dict[str, str]
:keyword compute: Compute target for batch inference operation.
:paramtype compute: str
+ :keyword deployment_configuration: Properties relevant to different deployment types.
+ :paramtype deployment_configuration:
+ ~azure.mgmt.machinelearningservices.models.BatchDeploymentConfiguration
:keyword error_threshold: Error threshold, if the error count for the entire input goes above
this value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
@@ -3310,6 +4230,7 @@ def __init__(
**kwargs
)
self.compute = compute
+ self.deployment_configuration = deployment_configuration
self.error_threshold = error_threshold
self.logging_level = logging_level
self.max_concurrency_per_instance = max_concurrency_per_instance
@@ -3644,6 +4565,64 @@ def __init__(
self.value = value
+class BatchPipelineComponentDeploymentConfiguration(BatchDeploymentConfiguration):
+ """Properties for a Batch Pipeline Component Deployment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar deployment_configuration_type: [Required] The type of the deployment. Required. Known
+ values are: "Model" and "PipelineComponent".
+ :vartype deployment_configuration_type: str or
+ ~azure.mgmt.machinelearningservices.models.BatchDeploymentConfigurationType
+ :ivar component_id: The ARM id of the component to be run.
+ :vartype component_id: ~azure.mgmt.machinelearningservices.models.IdAssetReference
+ :ivar description: The description which will be applied to the job.
+ :vartype description: str
+ :ivar settings: Run-time settings for the pipeline job.
+ :vartype settings: dict[str, str]
+ :ivar tags: The tags which will be applied to the job.
+ :vartype tags: dict[str, str]
+ """
+
+ _validation = {
+ "deployment_configuration_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "deployment_configuration_type": {"key": "deploymentConfigurationType", "type": "str"},
+ "component_id": {"key": "componentId", "type": "IdAssetReference"},
+ "description": {"key": "description", "type": "str"},
+ "settings": {"key": "settings", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ component_id: Optional["_models.IdAssetReference"] = None,
+ description: Optional[str] = None,
+ settings: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword component_id: The ARM id of the component to be run.
+ :paramtype component_id: ~azure.mgmt.machinelearningservices.models.IdAssetReference
+ :keyword description: The description which will be applied to the job.
+ :paramtype description: str
+ :keyword settings: Run-time settings for the pipeline job.
+ :paramtype settings: dict[str, str]
+ :keyword tags: The tags which will be applied to the job.
+ :paramtype tags: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.deployment_configuration_type: str = "PipelineComponent"
+ self.component_id = component_id
+ self.description = description
+ self.settings = settings
+ self.tags = tags
+
+
class BatchRetrySettings(_serialization.Model):
"""Retry settings for a batch inference operation.
@@ -3735,7 +4714,7 @@ def __init__(self, **kwargs: Any) -> None:
class BindOptions(_serialization.Model):
- """Describes the bind options for the container.
+ """BindOptions.
:ivar propagation: Type of Bind Option.
:vartype propagation: str
@@ -3872,1204 +4851,1385 @@ def __init__(self, *, context_uri: str, dockerfile_path: str = "Dockerfile", **k
self.dockerfile_path = dockerfile_path
-class CertificateDatastoreCredentials(DatastoreCredentials):
- """Certificate datastore credentials configuration.
+class CapacityReservationGroup(TrackedResource):
+ """CapacityReservationGroup.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
- :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
- :ivar authority_url: Authority URL used for authentication.
- :vartype authority_url: str
- :ivar client_id: [Required] Service principal client ID. Required.
- :vartype client_id: str
- :ivar resource_url: Resource the service principal has access to.
- :vartype resource_url: str
- :ivar secrets: [Required] Service principal secrets. Required.
- :vartype secrets: ~azure.mgmt.machinelearningservices.models.CertificateDatastoreSecrets
- :ivar tenant_id: [Required] ID of the tenant to which the service principal belongs. Required.
- :vartype tenant_id: str
- :ivar thumbprint: [Required] Thumbprint of the certificate used for authentication. Required.
- :vartype thumbprint: str
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :vartype kind: str
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties:
+ ~azure.mgmt.machinelearningservices.models.CapacityReservationGroupProperties
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
- "credentials_type": {"required": True},
- "client_id": {"required": True},
- "secrets": {"required": True},
- "tenant_id": {"required": True},
- "thumbprint": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "credentials_type": {"key": "credentialsType", "type": "str"},
- "authority_url": {"key": "authorityUrl", "type": "str"},
- "client_id": {"key": "clientId", "type": "str"},
- "resource_url": {"key": "resourceUrl", "type": "str"},
- "secrets": {"key": "secrets", "type": "CertificateDatastoreSecrets"},
- "tenant_id": {"key": "tenantId", "type": "str"},
- "thumbprint": {"key": "thumbprint", "type": "str"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
+ "properties": {"key": "properties", "type": "CapacityReservationGroupProperties"},
+ "sku": {"key": "sku", "type": "Sku"},
}
def __init__(
self,
*,
- client_id: str,
- secrets: "_models.CertificateDatastoreSecrets",
- tenant_id: str,
- thumbprint: str,
- authority_url: Optional[str] = None,
- resource_url: Optional[str] = None,
+ location: str,
+ properties: "_models.CapacityReservationGroupProperties",
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
+ sku: Optional["_models.Sku"] = None,
**kwargs: Any
) -> None:
"""
- :keyword authority_url: Authority URL used for authentication.
- :paramtype authority_url: str
- :keyword client_id: [Required] Service principal client ID. Required.
- :paramtype client_id: str
- :keyword resource_url: Resource the service principal has access to.
- :paramtype resource_url: str
- :keyword secrets: [Required] Service principal secrets. Required.
- :paramtype secrets: ~azure.mgmt.machinelearningservices.models.CertificateDatastoreSecrets
- :keyword tenant_id: [Required] ID of the tenant to which the service principal belongs.
- Required.
- :paramtype tenant_id: str
- :keyword thumbprint: [Required] Thumbprint of the certificate used for authentication.
- Required.
- :paramtype thumbprint: str
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :paramtype kind: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.CapacityReservationGroupProperties
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
- super().__init__(**kwargs)
- self.credentials_type: str = "Certificate"
- self.authority_url = authority_url
- self.client_id = client_id
- self.resource_url = resource_url
- self.secrets = secrets
- self.tenant_id = tenant_id
- self.thumbprint = thumbprint
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.identity = identity
+ self.kind = kind
+ self.properties = properties
+ self.sku = sku
-class CertificateDatastoreSecrets(DatastoreSecrets):
- """Datastore certificate secrets.
+class CapacityReservationGroupProperties(_serialization.Model):
+ """CapacityReservationGroupProperties.
All required parameters must be populated in order to send to Azure.
- :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
- Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
- :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
- :ivar certificate: Service principal certificate.
- :vartype certificate: str
+ :ivar offer: Offer used by this capacity reservation group.
+ :vartype offer: ~azure.mgmt.machinelearningservices.models.ServerlessOffer
+ :ivar reserved_capacity: [Required] Specifies the amount of capacity to reserve. Required.
+ :vartype reserved_capacity: int
"""
_validation = {
- "secrets_type": {"required": True},
+ "reserved_capacity": {"required": True},
}
_attribute_map = {
- "secrets_type": {"key": "secretsType", "type": "str"},
- "certificate": {"key": "certificate", "type": "str"},
+ "offer": {"key": "offer", "type": "ServerlessOffer"},
+ "reserved_capacity": {"key": "reservedCapacity", "type": "int"},
}
- def __init__(self, *, certificate: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, reserved_capacity: int, offer: Optional["_models.ServerlessOffer"] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword certificate: Service principal certificate.
- :paramtype certificate: str
+ :keyword offer: Offer used by this capacity reservation group.
+ :paramtype offer: ~azure.mgmt.machinelearningservices.models.ServerlessOffer
+ :keyword reserved_capacity: [Required] Specifies the amount of capacity to reserve. Required.
+ :paramtype reserved_capacity: int
"""
super().__init__(**kwargs)
- self.secrets_type: str = "Certificate"
- self.certificate = certificate
+ self.offer = offer
+ self.reserved_capacity = reserved_capacity
-class TableVertical(_serialization.Model):
- """Abstract class for AutoML tasks that use table dataset as input - such as
- Classification/Regression/Forecasting.
+class CapacityReservationGroupTrackedResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of CapacityReservationGroup entities.
- :ivar cv_split_column_names: Columns to use for CVSplit data.
- :vartype cv_split_column_names: list[str]
- :ivar featurization_settings: Featurization inputs needed for AutoML job.
- :vartype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :ivar limit_settings: Execution constraints for AutoMLJob.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
- when validation dataset is not provided.
- :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :ivar test_data: Test data input.
- :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype test_data_size: float
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :vartype weight_column_name: str
+ :ivar next_link: The link to the next page of CapacityReservationGroup objects. If null, there
+ are no additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type CapacityReservationGroup.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.CapacityReservationGroup]
"""
_attribute_map = {
- "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
- "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
- "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
- "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
- "test_data": {"key": "testData", "type": "MLTableJobInput"},
- "test_data_size": {"key": "testDataSize", "type": "float"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "weight_column_name": {"key": "weightColumnName", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[CapacityReservationGroup]"},
}
def __init__(
self,
*,
- cv_split_column_names: Optional[List[str]] = None,
- featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
- limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
- n_cross_validations: Optional["_models.NCrossValidations"] = None,
- test_data: Optional["_models.MLTableJobInput"] = None,
- test_data_size: Optional[float] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- weight_column_name: Optional[str] = None,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.CapacityReservationGroup"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword cv_split_column_names: Columns to use for CVSplit data.
- :paramtype cv_split_column_names: list[str]
- :keyword featurization_settings: Featurization inputs needed for AutoML job.
- :paramtype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :keyword limit_settings: Execution constraints for AutoMLJob.
- :paramtype limit_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :keyword n_cross_validations: Number of cross validation folds to be applied on training
- dataset
- when validation dataset is not provided.
- :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :keyword test_data: Test data input.
- :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype test_data_size: float
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :paramtype weight_column_name: str
+ :keyword next_link: The link to the next page of CapacityReservationGroup objects. If null,
+ there are no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type CapacityReservationGroup.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.CapacityReservationGroup]
"""
super().__init__(**kwargs)
- self.cv_split_column_names = cv_split_column_names
- self.featurization_settings = featurization_settings
- self.limit_settings = limit_settings
- self.n_cross_validations = n_cross_validations
- self.test_data = test_data
- self.test_data_size = test_data_size
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.weight_column_name = weight_column_name
+ self.next_link = next_link
+ self.value = value
-class Classification(TableVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
- """Classification task in AutoML Table vertical.
+class DataDriftMetricThresholdBase(_serialization.Model):
+ """DataDriftMetricThresholdBase.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CategoricalDataDriftMetricThreshold, NumericalDataDriftMetricThreshold
All required parameters must be populated in order to send to Azure.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar cv_split_column_names: Columns to use for CVSplit data.
- :vartype cv_split_column_names: list[str]
- :ivar featurization_settings: Featurization inputs needed for AutoML job.
- :vartype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :ivar limit_settings: Execution constraints for AutoMLJob.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
- when validation dataset is not provided.
- :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :ivar test_data: Test data input.
- :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype test_data_size: float
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :vartype weight_column_name: str
- :ivar positive_label: Positive label for binary metrics calculation.
- :vartype positive_label: str
- :ivar primary_metric: Primary metric for the task. Known values are: "AUCWeighted", "Accuracy",
- "NormMacroRecall", "AveragePrecisionScoreWeighted", and "PrecisionScoreWeighted".
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
- :ivar training_settings: Inputs for training phase for an AutoML Job.
- :vartype training_settings:
- ~azure.mgmt.machinelearningservices.models.ClassificationTrainingSettings
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
_validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
+ "data_type": {"required": True},
}
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
- "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
- "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
- "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
- "test_data": {"key": "testData", "type": "MLTableJobInput"},
- "test_data_size": {"key": "testDataSize", "type": "float"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "weight_column_name": {"key": "weightColumnName", "type": "str"},
- "positive_label": {"key": "positiveLabel", "type": "str"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
- "training_settings": {"key": "trainingSettings", "type": "ClassificationTrainingSettings"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ }
+
+ _subtype_map = {
+ "data_type": {
+ "Categorical": "CategoricalDataDriftMetricThreshold",
+ "Numerical": "NumericalDataDriftMetricThreshold",
+ }
+ }
+
+ def __init__(self, *, threshold: Optional["_models.MonitoringThreshold"] = None, **kwargs: Any) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+ super().__init__(**kwargs)
+ self.data_type: Optional[str] = None
+ self.threshold = threshold
+
+
+class CategoricalDataDriftMetricThreshold(DataDriftMetricThresholdBase):
+ """CategoricalDataDriftMetricThreshold.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The categorical data drift metric to calculate. Required. Known values
+ are: "JensenShannonDistance", "PopulationStabilityIndex", and "PearsonsChiSquaredTest".
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.CategoricalDataDriftMetric
+ """
+
+ _validation = {
+ "data_type": {"required": True},
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
def __init__(
self,
*,
- training_data: "_models.MLTableJobInput",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- cv_split_column_names: Optional[List[str]] = None,
- featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
- limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
- n_cross_validations: Optional["_models.NCrossValidations"] = None,
- test_data: Optional["_models.MLTableJobInput"] = None,
- test_data_size: Optional[float] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- weight_column_name: Optional[str] = None,
- positive_label: Optional[str] = None,
- primary_metric: Optional[Union[str, "_models.ClassificationPrimaryMetrics"]] = None,
- training_settings: Optional["_models.ClassificationTrainingSettings"] = None,
+ metric: Union[str, "_models.CategoricalDataDriftMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
**kwargs: Any
) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword cv_split_column_names: Columns to use for CVSplit data.
- :paramtype cv_split_column_names: list[str]
- :keyword featurization_settings: Featurization inputs needed for AutoML job.
- :paramtype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :keyword limit_settings: Execution constraints for AutoMLJob.
- :paramtype limit_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :keyword n_cross_validations: Number of cross validation folds to be applied on training
- dataset
- when validation dataset is not provided.
- :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :keyword test_data: Test data input.
- :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype test_data_size: float
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :paramtype weight_column_name: str
- :keyword positive_label: Positive label for binary metrics calculation.
- :paramtype positive_label: str
- :keyword primary_metric: Primary metric for the task. Known values are: "AUCWeighted",
- "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and "PrecisionScoreWeighted".
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
- :keyword training_settings: Inputs for training phase for an AutoML Job.
- :paramtype training_settings:
- ~azure.mgmt.machinelearningservices.models.ClassificationTrainingSettings
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The categorical data drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex", and "PearsonsChiSquaredTest".
+ :paramtype metric: str or ~azure.mgmt.machinelearningservices.models.CategoricalDataDriftMetric
"""
- super().__init__(
- cv_split_column_names=cv_split_column_names,
- featurization_settings=featurization_settings,
- limit_settings=limit_settings,
- n_cross_validations=n_cross_validations,
- test_data=test_data,
- test_data_size=test_data_size,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- weight_column_name=weight_column_name,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "Classification"
- self.training_data = training_data
- self.positive_label = positive_label
- self.primary_metric = primary_metric
- self.training_settings = training_settings
- self.cv_split_column_names = cv_split_column_names
- self.featurization_settings = featurization_settings
- self.limit_settings = limit_settings
- self.n_cross_validations = n_cross_validations
- self.test_data = test_data
- self.test_data_size = test_data_size
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.weight_column_name = weight_column_name
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Categorical"
+ self.metric = metric
-class TrainingSettings(_serialization.Model):
- """Training related configuration.
+class DataQualityMetricThresholdBase(_serialization.Model):
+ """DataQualityMetricThresholdBase.
- :ivar enable_dnn_training: Enable recommendation of DNN models.
- :vartype enable_dnn_training: bool
- :ivar enable_model_explainability: Flag to turn on explainability on best model.
- :vartype enable_model_explainability: bool
- :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :vartype enable_onnx_compatible_models: bool
- :ivar enable_stack_ensemble: Enable stack ensemble run.
- :vartype enable_stack_ensemble: bool
- :ivar enable_vote_ensemble: Enable voting ensemble run.
- :vartype enable_vote_ensemble: bool
- :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :vartype ensemble_model_download_timeout: ~datetime.timedelta
- :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :vartype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CategoricalDataQualityMetricThreshold, NumericalDataQualityMetricThreshold
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
+ _validation = {
+ "data_type": {"required": True},
+ }
+
_attribute_map = {
- "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
- "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
- "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
- "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
- "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
- "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
- "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
}
- def __init__(
- self,
- *,
- enable_dnn_training: bool = False,
- enable_model_explainability: bool = True,
- enable_onnx_compatible_models: bool = False,
- enable_stack_ensemble: bool = True,
- enable_vote_ensemble: bool = True,
- ensemble_model_download_timeout: datetime.timedelta = "PT5M",
- stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
- **kwargs: Any
- ) -> None:
+ _subtype_map = {
+ "data_type": {
+ "Categorical": "CategoricalDataQualityMetricThreshold",
+ "Numerical": "NumericalDataQualityMetricThreshold",
+ }
+ }
+
+ def __init__(self, *, threshold: Optional["_models.MonitoringThreshold"] = None, **kwargs: Any) -> None:
"""
- :keyword enable_dnn_training: Enable recommendation of DNN models.
- :paramtype enable_dnn_training: bool
- :keyword enable_model_explainability: Flag to turn on explainability on best model.
- :paramtype enable_model_explainability: bool
- :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :paramtype enable_onnx_compatible_models: bool
- :keyword enable_stack_ensemble: Enable stack ensemble run.
- :paramtype enable_stack_ensemble: bool
- :keyword enable_vote_ensemble: Enable voting ensemble run.
- :paramtype enable_vote_ensemble: bool
- :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :paramtype ensemble_model_download_timeout: ~datetime.timedelta
- :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :paramtype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
super().__init__(**kwargs)
- self.enable_dnn_training = enable_dnn_training
- self.enable_model_explainability = enable_model_explainability
- self.enable_onnx_compatible_models = enable_onnx_compatible_models
- self.enable_stack_ensemble = enable_stack_ensemble
- self.enable_vote_ensemble = enable_vote_ensemble
- self.ensemble_model_download_timeout = ensemble_model_download_timeout
- self.stack_ensemble_settings = stack_ensemble_settings
+ self.data_type: Optional[str] = None
+ self.threshold = threshold
-class ClassificationTrainingSettings(TrainingSettings):
- """Classification Training related configuration.
+class CategoricalDataQualityMetricThreshold(DataQualityMetricThresholdBase):
+ """CategoricalDataQualityMetricThreshold.
- :ivar enable_dnn_training: Enable recommendation of DNN models.
- :vartype enable_dnn_training: bool
- :ivar enable_model_explainability: Flag to turn on explainability on best model.
- :vartype enable_model_explainability: bool
- :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :vartype enable_onnx_compatible_models: bool
- :ivar enable_stack_ensemble: Enable stack ensemble run.
- :vartype enable_stack_ensemble: bool
- :ivar enable_vote_ensemble: Enable voting ensemble run.
- :vartype enable_vote_ensemble: bool
- :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :vartype ensemble_model_download_timeout: ~datetime.timedelta
- :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :vartype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :ivar allowed_training_algorithms: Allowed models for classification task.
- :vartype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ClassificationModels]
- :ivar blocked_training_algorithms: Blocked models for classification task.
- :vartype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ClassificationModels]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The categorical data quality metric to calculate. Required. Known
+ values are: "NullValueRate", "DataTypeErrorRate", and "OutOfBoundsRate".
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.CategoricalDataQualityMetric
"""
+ _validation = {
+ "data_type": {"required": True},
+ "metric": {"required": True},
+ }
+
_attribute_map = {
- "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
- "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
- "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
- "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
- "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
- "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
- "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
- "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
- "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
def __init__(
self,
*,
- enable_dnn_training: bool = False,
- enable_model_explainability: bool = True,
- enable_onnx_compatible_models: bool = False,
- enable_stack_ensemble: bool = True,
- enable_vote_ensemble: bool = True,
- ensemble_model_download_timeout: datetime.timedelta = "PT5M",
- stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
- allowed_training_algorithms: Optional[List[Union[str, "_models.ClassificationModels"]]] = None,
- blocked_training_algorithms: Optional[List[Union[str, "_models.ClassificationModels"]]] = None,
+ metric: Union[str, "_models.CategoricalDataQualityMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
**kwargs: Any
) -> None:
"""
- :keyword enable_dnn_training: Enable recommendation of DNN models.
- :paramtype enable_dnn_training: bool
- :keyword enable_model_explainability: Flag to turn on explainability on best model.
- :paramtype enable_model_explainability: bool
- :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :paramtype enable_onnx_compatible_models: bool
- :keyword enable_stack_ensemble: Enable stack ensemble run.
- :paramtype enable_stack_ensemble: bool
- :keyword enable_vote_ensemble: Enable voting ensemble run.
- :paramtype enable_vote_ensemble: bool
- :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :paramtype ensemble_model_download_timeout: ~datetime.timedelta
- :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :paramtype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :keyword allowed_training_algorithms: Allowed models for classification task.
- :paramtype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ClassificationModels]
- :keyword blocked_training_algorithms: Blocked models for classification task.
- :paramtype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ClassificationModels]
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The categorical data quality metric to calculate. Required. Known
+ values are: "NullValueRate", "DataTypeErrorRate", and "OutOfBoundsRate".
+ :paramtype metric: str or
+ ~azure.mgmt.machinelearningservices.models.CategoricalDataQualityMetric
"""
- super().__init__(
- enable_dnn_training=enable_dnn_training,
- enable_model_explainability=enable_model_explainability,
- enable_onnx_compatible_models=enable_onnx_compatible_models,
- enable_stack_ensemble=enable_stack_ensemble,
- enable_vote_ensemble=enable_vote_ensemble,
- ensemble_model_download_timeout=ensemble_model_download_timeout,
- stack_ensemble_settings=stack_ensemble_settings,
- **kwargs
- )
- self.allowed_training_algorithms = allowed_training_algorithms
- self.blocked_training_algorithms = blocked_training_algorithms
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Categorical"
+ self.metric = metric
-class ClusterUpdateParameters(_serialization.Model):
- """AmlCompute update parameters.
+class PredictionDriftMetricThresholdBase(_serialization.Model):
+ """PredictionDriftMetricThresholdBase.
- :ivar properties: Properties of ClusterUpdate.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ScaleSettingsInformation
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CategoricalPredictionDriftMetricThreshold, NumericalPredictionDriftMetricThreshold
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
+ _validation = {
+ "data_type": {"required": True},
+ }
+
_attribute_map = {
- "properties": {"key": "properties.properties", "type": "ScaleSettingsInformation"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
}
- def __init__(self, *, properties: Optional["_models.ScaleSettingsInformation"] = None, **kwargs: Any) -> None:
+ _subtype_map = {
+ "data_type": {
+ "Categorical": "CategoricalPredictionDriftMetricThreshold",
+ "Numerical": "NumericalPredictionDriftMetricThreshold",
+ }
+ }
+
+ def __init__(self, *, threshold: Optional["_models.MonitoringThreshold"] = None, **kwargs: Any) -> None:
"""
- :keyword properties: Properties of ClusterUpdate.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ScaleSettingsInformation
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.data_type: Optional[str] = None
+ self.threshold = threshold
-class CodeConfiguration(_serialization.Model):
- """Configuration for a scoring code asset.
+class CategoricalPredictionDriftMetricThreshold(PredictionDriftMetricThresholdBase):
+ """CategoricalPredictionDriftMetricThreshold.
All required parameters must be populated in order to send to Azure.
- :ivar code_id: ARM resource ID of the code asset.
- :vartype code_id: str
- :ivar scoring_script: [Required] The script to execute on startup. eg. "score.py". Required.
- :vartype scoring_script: str
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The categorical prediction drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex", and "PearsonsChiSquaredTest".
+ :vartype metric: str or
+ ~azure.mgmt.machinelearningservices.models.CategoricalPredictionDriftMetric
"""
_validation = {
- "scoring_script": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "data_type": {"required": True},
+ "metric": {"required": True},
}
_attribute_map = {
- "code_id": {"key": "codeId", "type": "str"},
- "scoring_script": {"key": "scoringScript", "type": "str"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
- def __init__(self, *, scoring_script: str, code_id: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.CategoricalPredictionDriftMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword code_id: ARM resource ID of the code asset.
- :paramtype code_id: str
- :keyword scoring_script: [Required] The script to execute on startup. eg. "score.py". Required.
- :paramtype scoring_script: str
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The categorical prediction drift metric to calculate. Required.
+ Known values are: "JensenShannonDistance", "PopulationStabilityIndex", and
+ "PearsonsChiSquaredTest".
+ :paramtype metric: str or
+ ~azure.mgmt.machinelearningservices.models.CategoricalPredictionDriftMetric
"""
- super().__init__(**kwargs)
- self.code_id = code_id
- self.scoring_script = scoring_script
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Categorical"
+ self.metric = metric
-class CodeContainer(Resource):
- """Azure Resource Manager resource envelope.
-
- Variables are only populated by the server, and will be ignored when sending a request.
+class CertificateDatastoreCredentials(DatastoreCredentials):
+ """Certificate datastore credentials configuration.
All required parameters must be populated in order to send to Azure.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.CodeContainerProperties
+ :ivar credentials_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", "ServicePrincipal",
+ "KerberosKeytab", and "KerberosPassword".
+ :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ :ivar authority_url: Authority URL used for authentication.
+ :vartype authority_url: str
+ :ivar client_id: [Required] Service principal client ID. Required.
+ :vartype client_id: str
+ :ivar resource_url: Resource the service principal has access to.
+ :vartype resource_url: str
+ :ivar secrets: [Required] Service principal secrets. Required.
+ :vartype secrets: ~azure.mgmt.machinelearningservices.models.CertificateDatastoreSecrets
+ :ivar tenant_id: [Required] ID of the tenant to which the service principal belongs. Required.
+ :vartype tenant_id: str
+ :ivar thumbprint: [Required] Thumbprint of the certificate used for authentication. Required.
+ :vartype thumbprint: str
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
+ "credentials_type": {"required": True},
+ "client_id": {"required": True},
+ "secrets": {"required": True},
+ "tenant_id": {"required": True},
+ "thumbprint": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "CodeContainerProperties"},
+ "credentials_type": {"key": "credentialsType", "type": "str"},
+ "authority_url": {"key": "authorityUrl", "type": "str"},
+ "client_id": {"key": "clientId", "type": "str"},
+ "resource_url": {"key": "resourceUrl", "type": "str"},
+ "secrets": {"key": "secrets", "type": "CertificateDatastoreSecrets"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
+ "thumbprint": {"key": "thumbprint", "type": "str"},
}
- def __init__(self, *, properties: "_models.CodeContainerProperties", **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ client_id: str,
+ secrets: "_models.CertificateDatastoreSecrets",
+ tenant_id: str,
+ thumbprint: str,
+ authority_url: Optional[str] = None,
+ resource_url: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.CodeContainerProperties
+ :keyword authority_url: Authority URL used for authentication.
+ :paramtype authority_url: str
+ :keyword client_id: [Required] Service principal client ID. Required.
+ :paramtype client_id: str
+ :keyword resource_url: Resource the service principal has access to.
+ :paramtype resource_url: str
+ :keyword secrets: [Required] Service principal secrets. Required.
+ :paramtype secrets: ~azure.mgmt.machinelearningservices.models.CertificateDatastoreSecrets
+ :keyword tenant_id: [Required] ID of the tenant to which the service principal belongs.
+ Required.
+ :paramtype tenant_id: str
+ :keyword thumbprint: [Required] Thumbprint of the certificate used for authentication.
+ Required.
+ :paramtype thumbprint: str
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.credentials_type: str = "Certificate"
+ self.authority_url = authority_url
+ self.client_id = client_id
+ self.resource_url = resource_url
+ self.secrets = secrets
+ self.tenant_id = tenant_id
+ self.thumbprint = thumbprint
-class CodeContainerProperties(AssetContainer):
- """Container for code asset versions.
+class CertificateDatastoreSecrets(DatastoreSecrets):
+ """Datastore certificate secrets.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar latest_version: The latest version inside this container.
- :vartype latest_version: str
- :ivar next_version: The next auto incremental version.
- :vartype next_version: str
- :ivar provisioning_state: Provisioning state for the code container. Known values are:
- "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
+ Known values are: "AccountKey", "Certificate", "Sas", "ServicePrincipal", "KerberosPassword",
+ and "KerberosKeytab".
+ :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
+ :ivar certificate: Service principal certificate.
+ :vartype certificate: str
"""
_validation = {
- "latest_version": {"readonly": True},
- "next_version": {"readonly": True},
- "provisioning_state": {"readonly": True},
+ "secrets_type": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "latest_version": {"key": "latestVersion", "type": "str"},
- "next_version": {"key": "nextVersion", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "secrets_type": {"key": "secretsType", "type": "str"},
+ "certificate": {"key": "certificate", "type": "str"},
}
- def __init__(
- self,
- *,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- is_archived: bool = False,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, certificate: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
+ :keyword certificate: Service principal certificate.
+ :paramtype certificate: str
"""
- super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
- self.provisioning_state = None
+ super().__init__(**kwargs)
+ self.secrets_type: str = "Certificate"
+ self.certificate = certificate
-class CodeContainerResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of CodeContainer entities.
+class TableVertical(_serialization.Model): # pylint: disable=too-many-instance-attributes
+ """Abstract class for AutoML tasks that use table dataset as input - such as
+ Classification/Regression/Forecasting.
- :ivar next_link: The link to the next page of CodeContainer objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type CodeContainer.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.CodeContainer]
+ :ivar cv_split_column_names: Columns to use for CVSplit data.
+ :vartype cv_split_column_names: list[str]
+ :ivar featurization_settings: Featurization inputs needed for AutoML job.
+ :vartype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :ivar fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :vartype fixed_parameters: ~azure.mgmt.machinelearningservices.models.TableFixedParameters
+ :ivar limit_settings: Execution constraints for AutoMLJob.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
+ when validation dataset is not provided.
+ :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space: list[~azure.mgmt.machinelearningservices.models.TableParameterSubspace]
+ :ivar sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.TableSweepSettings
+ :ivar test_data: Test data input.
+ :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype test_data_size: float
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :vartype weight_column_name: str
"""
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[CodeContainer]"},
+ "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
+ "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
+ "fixed_parameters": {"key": "fixedParameters", "type": "TableFixedParameters"},
+ "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
+ "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
+ "search_space": {"key": "searchSpace", "type": "[TableParameterSubspace]"},
+ "sweep_settings": {"key": "sweepSettings", "type": "TableSweepSettings"},
+ "test_data": {"key": "testData", "type": "MLTableJobInput"},
+ "test_data_size": {"key": "testDataSize", "type": "float"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "weight_column_name": {"key": "weightColumnName", "type": "str"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.CodeContainer"]] = None, **kwargs: Any
+ self,
+ *,
+ cv_split_column_names: Optional[List[str]] = None,
+ featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
+ fixed_parameters: Optional["_models.TableFixedParameters"] = None,
+ limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
+ n_cross_validations: Optional["_models.NCrossValidations"] = None,
+ search_space: Optional[List["_models.TableParameterSubspace"]] = None,
+ sweep_settings: Optional["_models.TableSweepSettings"] = None,
+ test_data: Optional["_models.MLTableJobInput"] = None,
+ test_data_size: Optional[float] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ weight_column_name: Optional[str] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of CodeContainer objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type CodeContainer.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.CodeContainer]
- """
- super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
-
-class CodeVersion(Resource):
- """Azure Resource Manager resource envelope.
-
- Variables are only populated by the server, and will be ignored when sending a request.
-
- All required parameters must be populated in order to send to Azure.
-
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.CodeVersionProperties
- """
-
- _validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
- }
-
- _attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "CodeVersionProperties"},
- }
-
- def __init__(self, *, properties: "_models.CodeVersionProperties", **kwargs: Any) -> None:
- """
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.CodeVersionProperties
+ :keyword cv_split_column_names: Columns to use for CVSplit data.
+ :paramtype cv_split_column_names: list[str]
+ :keyword featurization_settings: Featurization inputs needed for AutoML job.
+ :paramtype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :keyword fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :paramtype fixed_parameters: ~azure.mgmt.machinelearningservices.models.TableFixedParameters
+ :keyword limit_settings: Execution constraints for AutoMLJob.
+ :paramtype limit_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :keyword n_cross_validations: Number of cross validation folds to be applied on training
+ dataset
+ when validation dataset is not provided.
+ :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.TableParameterSubspace]
+ :keyword sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.TableSweepSettings
+ :keyword test_data: Test data input.
+ :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype test_data_size: float
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :paramtype weight_column_name: str
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.cv_split_column_names = cv_split_column_names
+ self.featurization_settings = featurization_settings
+ self.fixed_parameters = fixed_parameters
+ self.limit_settings = limit_settings
+ self.n_cross_validations = n_cross_validations
+ self.search_space = search_space
+ self.sweep_settings = sweep_settings
+ self.test_data = test_data
+ self.test_data_size = test_data_size
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.weight_column_name = weight_column_name
-class CodeVersionProperties(AssetBase):
- """Code asset version details.
+class Classification(TableVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
+ """Classification task in AutoML Table vertical.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
- :vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar code_uri: Uri where code is located.
- :vartype code_uri: str
- :ivar provisioning_state: Provisioning state for the code version. Known values are:
- "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar cv_split_column_names: Columns to use for CVSplit data.
+ :vartype cv_split_column_names: list[str]
+ :ivar featurization_settings: Featurization inputs needed for AutoML job.
+ :vartype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :ivar fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :vartype fixed_parameters: ~azure.mgmt.machinelearningservices.models.TableFixedParameters
+ :ivar limit_settings: Execution constraints for AutoMLJob.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
+ when validation dataset is not provided.
+ :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space: list[~azure.mgmt.machinelearningservices.models.TableParameterSubspace]
+ :ivar sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.TableSweepSettings
+ :ivar test_data: Test data input.
+ :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype test_data_size: float
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :vartype weight_column_name: str
+ :ivar positive_label: Positive label for binary metrics calculation.
+ :vartype positive_label: str
+ :ivar primary_metric: Primary metric for the task. Known values are: "AUCWeighted", "Accuracy",
+ "NormMacroRecall", "AveragePrecisionScoreWeighted", and "PrecisionScoreWeighted".
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ :ivar training_settings: Inputs for training phase for an AutoML Job.
+ :vartype training_settings:
+ ~azure.mgmt.machinelearningservices.models.ClassificationTrainingSettings
"""
_validation = {
- "provisioning_state": {"readonly": True},
+ "task_type": {"required": True},
+ "training_data": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "is_anonymous": {"key": "isAnonymous", "type": "bool"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "code_uri": {"key": "codeUri", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
+ "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
+ "fixed_parameters": {"key": "fixedParameters", "type": "TableFixedParameters"},
+ "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
+ "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
+ "search_space": {"key": "searchSpace", "type": "[TableParameterSubspace]"},
+ "sweep_settings": {"key": "sweepSettings", "type": "TableSweepSettings"},
+ "test_data": {"key": "testData", "type": "MLTableJobInput"},
+ "test_data_size": {"key": "testDataSize", "type": "float"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "weight_column_name": {"key": "weightColumnName", "type": "str"},
+ "positive_label": {"key": "positiveLabel", "type": "str"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "training_settings": {"key": "trainingSettings", "type": "ClassificationTrainingSettings"},
}
def __init__(
self,
*,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- is_anonymous: bool = False,
- is_archived: bool = False,
- code_uri: Optional[str] = None,
+ training_data: "_models.MLTableJobInput",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ cv_split_column_names: Optional[List[str]] = None,
+ featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
+ fixed_parameters: Optional["_models.TableFixedParameters"] = None,
+ limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
+ n_cross_validations: Optional["_models.NCrossValidations"] = None,
+ search_space: Optional[List["_models.TableParameterSubspace"]] = None,
+ sweep_settings: Optional["_models.TableSweepSettings"] = None,
+ test_data: Optional["_models.MLTableJobInput"] = None,
+ test_data_size: Optional[float] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ weight_column_name: Optional[str] = None,
+ positive_label: Optional[str] = None,
+ primary_metric: Optional[Union[str, "_models.ClassificationPrimaryMetrics"]] = None,
+ training_settings: Optional["_models.ClassificationTrainingSettings"] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
- :paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword code_uri: Uri where code is located.
- :paramtype code_uri: str
- """
- super().__init__(
- description=description,
- properties=properties,
- tags=tags,
- is_anonymous=is_anonymous,
- is_archived=is_archived,
- **kwargs
- )
- self.code_uri = code_uri
- self.provisioning_state = None
-
-
-class CodeVersionResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of CodeVersion entities.
-
- :ivar next_link: The link to the next page of CodeVersion objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type CodeVersion.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.CodeVersion]
- """
-
- _attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[CodeVersion]"},
- }
-
- def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.CodeVersion"]] = None, **kwargs: Any
- ) -> None:
- """
- :keyword next_link: The link to the next page of CodeVersion objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type CodeVersion.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.CodeVersion]
- """
- super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
-
-class ColumnTransformer(_serialization.Model):
- """Column transformer parameters.
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword cv_split_column_names: Columns to use for CVSplit data.
+ :paramtype cv_split_column_names: list[str]
+ :keyword featurization_settings: Featurization inputs needed for AutoML job.
+ :paramtype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :keyword fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :paramtype fixed_parameters: ~azure.mgmt.machinelearningservices.models.TableFixedParameters
+ :keyword limit_settings: Execution constraints for AutoMLJob.
+ :paramtype limit_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :keyword n_cross_validations: Number of cross validation folds to be applied on training
+ dataset
+ when validation dataset is not provided.
+ :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.TableParameterSubspace]
+ :keyword sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.TableSweepSettings
+ :keyword test_data: Test data input.
+ :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype test_data_size: float
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :paramtype weight_column_name: str
+ :keyword positive_label: Positive label for binary metrics calculation.
+ :paramtype positive_label: str
+ :keyword primary_metric: Primary metric for the task. Known values are: "AUCWeighted",
+ "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and "PrecisionScoreWeighted".
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ :keyword training_settings: Inputs for training phase for an AutoML Job.
+ :paramtype training_settings:
+ ~azure.mgmt.machinelearningservices.models.ClassificationTrainingSettings
+ """
+ super().__init__(
+ cv_split_column_names=cv_split_column_names,
+ featurization_settings=featurization_settings,
+ fixed_parameters=fixed_parameters,
+ limit_settings=limit_settings,
+ n_cross_validations=n_cross_validations,
+ search_space=search_space,
+ sweep_settings=sweep_settings,
+ test_data=test_data,
+ test_data_size=test_data_size,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ weight_column_name=weight_column_name,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "Classification"
+ self.training_data = training_data
+ self.positive_label = positive_label
+ self.primary_metric = primary_metric
+ self.training_settings = training_settings
+ self.cv_split_column_names = cv_split_column_names
+ self.featurization_settings = featurization_settings
+ self.fixed_parameters = fixed_parameters
+ self.limit_settings = limit_settings
+ self.n_cross_validations = n_cross_validations
+ self.search_space = search_space
+ self.sweep_settings = sweep_settings
+ self.test_data = test_data
+ self.test_data_size = test_data_size
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.weight_column_name = weight_column_name
- :ivar fields: Fields to apply transformer logic on.
- :vartype fields: list[str]
- :ivar parameters: Different properties to be passed to transformer.
- Input expected is dictionary of key,value pairs in JSON format.
- :vartype parameters: JSON
+
+class ModelPerformanceMetricThresholdBase(_serialization.Model):
+ """ModelPerformanceMetricThresholdBase.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ClassificationModelPerformanceMetricThreshold, RegressionModelPerformanceMetricThreshold
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar model_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Classification" and "Regression".
+ :vartype model_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringModelType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
+ _validation = {
+ "model_type": {"required": True},
+ }
+
_attribute_map = {
- "fields": {"key": "fields", "type": "[str]"},
- "parameters": {"key": "parameters", "type": "object"},
+ "model_type": {"key": "modelType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
}
- def __init__(self, *, fields: Optional[List[str]] = None, parameters: Optional[JSON] = None, **kwargs: Any) -> None:
+ _subtype_map = {
+ "model_type": {
+ "Classification": "ClassificationModelPerformanceMetricThreshold",
+ "Regression": "RegressionModelPerformanceMetricThreshold",
+ }
+ }
+
+ def __init__(self, *, threshold: Optional["_models.MonitoringThreshold"] = None, **kwargs: Any) -> None:
"""
- :keyword fields: Fields to apply transformer logic on.
- :paramtype fields: list[str]
- :keyword parameters: Different properties to be passed to transformer.
- Input expected is dictionary of key,value pairs in JSON format.
- :paramtype parameters: JSON
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
super().__init__(**kwargs)
- self.fields = fields
- self.parameters = parameters
-
+ self.model_type: Optional[str] = None
+ self.threshold = threshold
-class CommandJob(JobBaseProperties): # pylint: disable=too-many-instance-attributes
- """Command job definition.
- Variables are only populated by the server, and will be ignored when sending a request.
+class ClassificationModelPerformanceMetricThreshold(ModelPerformanceMetricThresholdBase):
+ """ClassificationModelPerformanceMetricThreshold.
All required parameters must be populated in order to send to Azure.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar component_id: ARM resource ID of the component resource.
- :vartype component_id: str
- :ivar compute_id: ARM resource ID of the compute resource.
- :vartype compute_id: str
- :ivar display_name: Display name of job.
- :vartype display_name: str
- :ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
- placed in the "Default" experiment.
- :vartype experiment_name: str
- :ivar identity: Identity configuration. If set, this should be one of AmlToken,
- ManagedIdentity, UserIdentity or null.
- Defaults to AmlToken if null.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
- :vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
- :ivar services: List of JobEndpoints.
- For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
- :vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
- :ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
- "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
- "Canceled", "NotResponding", "Paused", and "Unknown".
- :vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
- :ivar code_id: ARM resource ID of the code asset.
- :vartype code_id: str
- :ivar command: [Required] The command to execute on startup of the job. eg. "python train.py".
- Required.
- :vartype command: str
- :ivar distribution: Distribution configuration of the job. If set, this should be one of Mpi,
- Tensorflow, PyTorch, or null.
- :vartype distribution: ~azure.mgmt.machinelearningservices.models.DistributionConfiguration
- :ivar environment_id: [Required] The ARM resource ID of the Environment specification for the
- job. Required.
- :vartype environment_id: str
- :ivar environment_variables: Environment variables included in the job.
- :vartype environment_variables: dict[str, str]
- :ivar inputs: Mapping of input data bindings used in the job.
- :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
- :ivar limits: Command Job limit.
- :vartype limits: ~azure.mgmt.machinelearningservices.models.CommandJobLimits
- :ivar outputs: Mapping of output data bindings used in the job.
- :vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
- :ivar parameters: Input parameters.
- :vartype parameters: JSON
- :ivar resources: Compute Resource configuration for the job.
- :vartype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
+ :ivar model_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Classification" and "Regression".
+ :vartype model_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringModelType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The classification model performance to calculate. Required. Known
+ values are: "Accuracy", "Precision", and "Recall".
+ :vartype metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationModelPerformanceMetric
"""
_validation = {
- "job_type": {"required": True},
- "status": {"readonly": True},
- "command": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- "environment_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- "parameters": {"readonly": True},
+ "model_type": {"required": True},
+ "metric": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "component_id": {"key": "componentId", "type": "str"},
- "compute_id": {"key": "computeId", "type": "str"},
- "display_name": {"key": "displayName", "type": "str"},
- "experiment_name": {"key": "experimentName", "type": "str"},
- "identity": {"key": "identity", "type": "IdentityConfiguration"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "job_type": {"key": "jobType", "type": "str"},
- "services": {"key": "services", "type": "{JobService}"},
- "status": {"key": "status", "type": "str"},
- "code_id": {"key": "codeId", "type": "str"},
- "command": {"key": "command", "type": "str"},
- "distribution": {"key": "distribution", "type": "DistributionConfiguration"},
- "environment_id": {"key": "environmentId", "type": "str"},
- "environment_variables": {"key": "environmentVariables", "type": "{str}"},
- "inputs": {"key": "inputs", "type": "{JobInput}"},
- "limits": {"key": "limits", "type": "CommandJobLimits"},
- "outputs": {"key": "outputs", "type": "{JobOutput}"},
- "parameters": {"key": "parameters", "type": "object"},
- "resources": {"key": "resources", "type": "JobResourceConfiguration"},
+ "model_type": {"key": "modelType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
def __init__(
self,
*,
- command: str,
- environment_id: str,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- component_id: Optional[str] = None,
- compute_id: Optional[str] = None,
- display_name: Optional[str] = None,
- experiment_name: str = "Default",
- identity: Optional["_models.IdentityConfiguration"] = None,
- is_archived: bool = False,
- services: Optional[Dict[str, "_models.JobService"]] = None,
- code_id: Optional[str] = None,
- distribution: Optional["_models.DistributionConfiguration"] = None,
- environment_variables: Optional[Dict[str, str]] = None,
- inputs: Optional[Dict[str, "_models.JobInput"]] = None,
- limits: Optional["_models.CommandJobLimits"] = None,
- outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
- resources: Optional["_models.JobResourceConfiguration"] = None,
+ metric: Union[str, "_models.ClassificationModelPerformanceMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword component_id: ARM resource ID of the component resource.
- :paramtype component_id: str
- :keyword compute_id: ARM resource ID of the compute resource.
- :paramtype compute_id: str
- :keyword display_name: Display name of job.
- :paramtype display_name: str
- :keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
- placed in the "Default" experiment.
- :paramtype experiment_name: str
- :keyword identity: Identity configuration. If set, this should be one of AmlToken,
- ManagedIdentity, UserIdentity or null.
- Defaults to AmlToken if null.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword services: List of JobEndpoints.
- For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
- :paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
- :keyword code_id: ARM resource ID of the code asset.
- :paramtype code_id: str
- :keyword command: [Required] The command to execute on startup of the job. eg. "python
- train.py". Required.
- :paramtype command: str
- :keyword distribution: Distribution configuration of the job. If set, this should be one of
- Mpi, Tensorflow, PyTorch, or null.
- :paramtype distribution: ~azure.mgmt.machinelearningservices.models.DistributionConfiguration
- :keyword environment_id: [Required] The ARM resource ID of the Environment specification for
- the job. Required.
- :paramtype environment_id: str
- :keyword environment_variables: Environment variables included in the job.
- :paramtype environment_variables: dict[str, str]
- :keyword inputs: Mapping of input data bindings used in the job.
- :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
- :keyword limits: Command Job limit.
- :paramtype limits: ~azure.mgmt.machinelearningservices.models.CommandJobLimits
- :keyword outputs: Mapping of output data bindings used in the job.
- :paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
- :keyword resources: Compute Resource configuration for the job.
- :paramtype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The classification model performance to calculate. Required. Known
+ values are: "Accuracy", "Precision", and "Recall".
+ :paramtype metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationModelPerformanceMetric
"""
- super().__init__(
- description=description,
- properties=properties,
- tags=tags,
- component_id=component_id,
- compute_id=compute_id,
- display_name=display_name,
- experiment_name=experiment_name,
- identity=identity,
- is_archived=is_archived,
- services=services,
- **kwargs
- )
- self.job_type: str = "Command"
- self.code_id = code_id
- self.command = command
- self.distribution = distribution
- self.environment_id = environment_id
- self.environment_variables = environment_variables
- self.inputs = inputs
- self.limits = limits
- self.outputs = outputs
- self.parameters = None
- self.resources = resources
+ super().__init__(threshold=threshold, **kwargs)
+ self.model_type: str = "Classification"
+ self.metric = metric
-class JobLimits(_serialization.Model):
- """JobLimits.
+class TrainingSettings(_serialization.Model):
+ """Training related configuration.
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- CommandJobLimits, SweepJobLimits
+ :ivar enable_dnn_training: Enable recommendation of DNN models.
+ :vartype enable_dnn_training: bool
+ :ivar enable_model_explainability: Flag to turn on explainability on best model.
+ :vartype enable_model_explainability: bool
+ :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :vartype enable_onnx_compatible_models: bool
+ :ivar enable_stack_ensemble: Enable stack ensemble run.
+ :vartype enable_stack_ensemble: bool
+ :ivar enable_vote_ensemble: Enable voting ensemble run.
+ :vartype enable_vote_ensemble: bool
+ :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :vartype ensemble_model_download_timeout: ~datetime.timedelta
+ :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :vartype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :ivar training_mode: TrainingMode mode - Setting to 'auto' is same as setting it to
+ 'non-distributed' for now, however in the future may result in mixed mode or heuristics based
+ mode selection. Default is 'auto'.
+ If 'Distributed' then only distributed featurization is used and distributed algorithms are
+ chosen.
+ If 'NonDistributed' then only non distributed algorithms are chosen. Known values are: "Auto",
+ "Distributed", and "NonDistributed".
+ :vartype training_mode: str or ~azure.mgmt.machinelearningservices.models.TrainingMode
+ """
- All required parameters must be populated in order to send to Azure.
+ _attribute_map = {
+ "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
+ "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
+ "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
+ "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
+ "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
+ "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
+ "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
+ "training_mode": {"key": "trainingMode", "type": "str"},
+ }
- :ivar job_limits_type: [Required] JobLimit type. Required. Known values are: "Command" and
- "Sweep".
- :vartype job_limits_type: str or ~azure.mgmt.machinelearningservices.models.JobLimitsType
- :ivar timeout: The max run duration in ISO 8601 format, after which the job will be cancelled.
- Only supports duration with precision as low as Seconds.
- :vartype timeout: ~datetime.timedelta
+ def __init__(
+ self,
+ *,
+ enable_dnn_training: bool = False,
+ enable_model_explainability: bool = True,
+ enable_onnx_compatible_models: bool = False,
+ enable_stack_ensemble: bool = True,
+ enable_vote_ensemble: bool = True,
+ ensemble_model_download_timeout: datetime.timedelta = "PT5M",
+ stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
+ training_mode: Optional[Union[str, "_models.TrainingMode"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enable_dnn_training: Enable recommendation of DNN models.
+ :paramtype enable_dnn_training: bool
+ :keyword enable_model_explainability: Flag to turn on explainability on best model.
+ :paramtype enable_model_explainability: bool
+ :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :paramtype enable_onnx_compatible_models: bool
+ :keyword enable_stack_ensemble: Enable stack ensemble run.
+ :paramtype enable_stack_ensemble: bool
+ :keyword enable_vote_ensemble: Enable voting ensemble run.
+ :paramtype enable_vote_ensemble: bool
+ :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :paramtype ensemble_model_download_timeout: ~datetime.timedelta
+ :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :paramtype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :keyword training_mode: TrainingMode mode - Setting to 'auto' is same as setting it to
+ 'non-distributed' for now, however in the future may result in mixed mode or heuristics based
+ mode selection. Default is 'auto'.
+ If 'Distributed' then only distributed featurization is used and distributed algorithms are
+ chosen.
+ If 'NonDistributed' then only non distributed algorithms are chosen. Known values are: "Auto",
+ "Distributed", and "NonDistributed".
+ :paramtype training_mode: str or ~azure.mgmt.machinelearningservices.models.TrainingMode
+ """
+ super().__init__(**kwargs)
+ self.enable_dnn_training = enable_dnn_training
+ self.enable_model_explainability = enable_model_explainability
+ self.enable_onnx_compatible_models = enable_onnx_compatible_models
+ self.enable_stack_ensemble = enable_stack_ensemble
+ self.enable_vote_ensemble = enable_vote_ensemble
+ self.ensemble_model_download_timeout = ensemble_model_download_timeout
+ self.stack_ensemble_settings = stack_ensemble_settings
+ self.training_mode = training_mode
+
+
+class ClassificationTrainingSettings(TrainingSettings):
+ """Classification Training related configuration.
+
+ :ivar enable_dnn_training: Enable recommendation of DNN models.
+ :vartype enable_dnn_training: bool
+ :ivar enable_model_explainability: Flag to turn on explainability on best model.
+ :vartype enable_model_explainability: bool
+ :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :vartype enable_onnx_compatible_models: bool
+ :ivar enable_stack_ensemble: Enable stack ensemble run.
+ :vartype enable_stack_ensemble: bool
+ :ivar enable_vote_ensemble: Enable voting ensemble run.
+ :vartype enable_vote_ensemble: bool
+ :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :vartype ensemble_model_download_timeout: ~datetime.timedelta
+ :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :vartype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :ivar training_mode: TrainingMode mode - Setting to 'auto' is same as setting it to
+ 'non-distributed' for now, however in the future may result in mixed mode or heuristics based
+ mode selection. Default is 'auto'.
+ If 'Distributed' then only distributed featurization is used and distributed algorithms are
+ chosen.
+ If 'NonDistributed' then only non distributed algorithms are chosen. Known values are: "Auto",
+ "Distributed", and "NonDistributed".
+ :vartype training_mode: str or ~azure.mgmt.machinelearningservices.models.TrainingMode
+ :ivar allowed_training_algorithms: Allowed models for classification task.
+ :vartype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationModels]
+ :ivar blocked_training_algorithms: Blocked models for classification task.
+ :vartype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationModels]
"""
- _validation = {
- "job_limits_type": {"required": True},
+ _attribute_map = {
+ "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
+ "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
+ "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
+ "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
+ "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
+ "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
+ "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
+ "training_mode": {"key": "trainingMode", "type": "str"},
+ "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
+ "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
}
+ def __init__(
+ self,
+ *,
+ enable_dnn_training: bool = False,
+ enable_model_explainability: bool = True,
+ enable_onnx_compatible_models: bool = False,
+ enable_stack_ensemble: bool = True,
+ enable_vote_ensemble: bool = True,
+ ensemble_model_download_timeout: datetime.timedelta = "PT5M",
+ stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
+ training_mode: Optional[Union[str, "_models.TrainingMode"]] = None,
+ allowed_training_algorithms: Optional[List[Union[str, "_models.ClassificationModels"]]] = None,
+ blocked_training_algorithms: Optional[List[Union[str, "_models.ClassificationModels"]]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enable_dnn_training: Enable recommendation of DNN models.
+ :paramtype enable_dnn_training: bool
+ :keyword enable_model_explainability: Flag to turn on explainability on best model.
+ :paramtype enable_model_explainability: bool
+ :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :paramtype enable_onnx_compatible_models: bool
+ :keyword enable_stack_ensemble: Enable stack ensemble run.
+ :paramtype enable_stack_ensemble: bool
+ :keyword enable_vote_ensemble: Enable voting ensemble run.
+ :paramtype enable_vote_ensemble: bool
+ :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :paramtype ensemble_model_download_timeout: ~datetime.timedelta
+ :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :paramtype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :keyword training_mode: TrainingMode mode - Setting to 'auto' is same as setting it to
+ 'non-distributed' for now, however in the future may result in mixed mode or heuristics based
+ mode selection. Default is 'auto'.
+ If 'Distributed' then only distributed featurization is used and distributed algorithms are
+ chosen.
+ If 'NonDistributed' then only non distributed algorithms are chosen. Known values are: "Auto",
+ "Distributed", and "NonDistributed".
+ :paramtype training_mode: str or ~azure.mgmt.machinelearningservices.models.TrainingMode
+ :keyword allowed_training_algorithms: Allowed models for classification task.
+ :paramtype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationModels]
+ :keyword blocked_training_algorithms: Blocked models for classification task.
+ :paramtype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationModels]
+ """
+ super().__init__(
+ enable_dnn_training=enable_dnn_training,
+ enable_model_explainability=enable_model_explainability,
+ enable_onnx_compatible_models=enable_onnx_compatible_models,
+ enable_stack_ensemble=enable_stack_ensemble,
+ enable_vote_ensemble=enable_vote_ensemble,
+ ensemble_model_download_timeout=ensemble_model_download_timeout,
+ stack_ensemble_settings=stack_ensemble_settings,
+ training_mode=training_mode,
+ **kwargs
+ )
+ self.allowed_training_algorithms = allowed_training_algorithms
+ self.blocked_training_algorithms = blocked_training_algorithms
+
+
+class ClusterUpdateParameters(_serialization.Model):
+ """AmlCompute update parameters.
+
+ :ivar properties: Properties of ClusterUpdate.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ScaleSettingsInformation
+ """
+
_attribute_map = {
- "job_limits_type": {"key": "jobLimitsType", "type": "str"},
- "timeout": {"key": "timeout", "type": "duration"},
+ "properties": {"key": "properties.properties", "type": "ScaleSettingsInformation"},
}
- _subtype_map = {"job_limits_type": {"Command": "CommandJobLimits", "Sweep": "SweepJobLimits"}}
-
- def __init__(self, *, timeout: Optional[datetime.timedelta] = None, **kwargs: Any) -> None:
+ def __init__(self, *, properties: Optional["_models.ScaleSettingsInformation"] = None, **kwargs: Any) -> None:
"""
- :keyword timeout: The max run duration in ISO 8601 format, after which the job will be
- cancelled. Only supports duration with precision as low as Seconds.
- :paramtype timeout: ~datetime.timedelta
+ :keyword properties: Properties of ClusterUpdate.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ScaleSettingsInformation
"""
super().__init__(**kwargs)
- self.job_limits_type: Optional[str] = None
- self.timeout = timeout
+ self.properties = properties
-class CommandJobLimits(JobLimits):
- """Command Job limit class.
+class ExportSummary(_serialization.Model):
+ """ExportSummary.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CsvExportSummary, CocoExportSummary, DatasetExportSummary
+
+ Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar job_limits_type: [Required] JobLimit type. Required. Known values are: "Command" and
- "Sweep".
- :vartype job_limits_type: str or ~azure.mgmt.machinelearningservices.models.JobLimitsType
- :ivar timeout: The max run duration in ISO 8601 format, after which the job will be cancelled.
- Only supports duration with precision as low as Seconds.
- :vartype timeout: ~datetime.timedelta
+ :ivar end_date_time: The time when the export was completed.
+ :vartype end_date_time: ~datetime.datetime
+ :ivar exported_row_count: The total number of labeled datapoints exported.
+ :vartype exported_row_count: int
+ :ivar format: [Required] The format of exported labels, also as the discriminator. Required.
+ Known values are: "Dataset", "Coco", and "CSV".
+ :vartype format: str or ~azure.mgmt.machinelearningservices.models.ExportFormatType
+ :ivar labeling_job_id: Name and identifier of the job containing exported labels.
+ :vartype labeling_job_id: str
+ :ivar start_date_time: The time when the export was requested.
+ :vartype start_date_time: ~datetime.datetime
"""
_validation = {
- "job_limits_type": {"required": True},
+ "end_date_time": {"readonly": True},
+ "exported_row_count": {"readonly": True},
+ "format": {"required": True},
+ "labeling_job_id": {"readonly": True},
+ "start_date_time": {"readonly": True},
}
_attribute_map = {
- "job_limits_type": {"key": "jobLimitsType", "type": "str"},
- "timeout": {"key": "timeout", "type": "duration"},
+ "end_date_time": {"key": "endDateTime", "type": "iso-8601"},
+ "exported_row_count": {"key": "exportedRowCount", "type": "int"},
+ "format": {"key": "format", "type": "str"},
+ "labeling_job_id": {"key": "labelingJobId", "type": "str"},
+ "start_date_time": {"key": "startDateTime", "type": "iso-8601"},
}
- def __init__(self, *, timeout: Optional[datetime.timedelta] = None, **kwargs: Any) -> None:
- """
- :keyword timeout: The max run duration in ISO 8601 format, after which the job will be
- cancelled. Only supports duration with precision as low as Seconds.
- :paramtype timeout: ~datetime.timedelta
- """
- super().__init__(timeout=timeout, **kwargs)
- self.job_limits_type: str = "Command"
+ _subtype_map = {
+ "format": {"CSV": "CsvExportSummary", "Coco": "CocoExportSummary", "Dataset": "DatasetExportSummary"}
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.end_date_time = None
+ self.exported_row_count = None
+ self.format: Optional[str] = None
+ self.labeling_job_id = None
+ self.start_date_time = None
-class ComponentContainer(Resource):
- """Azure Resource Manager resource envelope.
+class CocoExportSummary(ExportSummary):
+ """CocoExportSummary.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar id: Fully qualified resource ID for the resource. Ex -
+ :ivar end_date_time: The time when the export was completed.
+ :vartype end_date_time: ~datetime.datetime
+ :ivar exported_row_count: The total number of labeled datapoints exported.
+ :vartype exported_row_count: int
+ :ivar format: [Required] The format of exported labels, also as the discriminator. Required.
+ Known values are: "Dataset", "Coco", and "CSV".
+ :vartype format: str or ~azure.mgmt.machinelearningservices.models.ExportFormatType
+ :ivar labeling_job_id: Name and identifier of the job containing exported labels.
+ :vartype labeling_job_id: str
+ :ivar start_date_time: The time when the export was requested.
+ :vartype start_date_time: ~datetime.datetime
+ :ivar container_name: The container name to which the labels will be exported.
+ :vartype container_name: str
+ :ivar snapshot_path: The output path where the labels will be exported.
+ :vartype snapshot_path: str
+ """
+
+ _validation = {
+ "end_date_time": {"readonly": True},
+ "exported_row_count": {"readonly": True},
+ "format": {"required": True},
+ "labeling_job_id": {"readonly": True},
+ "start_date_time": {"readonly": True},
+ "container_name": {"readonly": True},
+ "snapshot_path": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "end_date_time": {"key": "endDateTime", "type": "iso-8601"},
+ "exported_row_count": {"key": "exportedRowCount", "type": "int"},
+ "format": {"key": "format", "type": "str"},
+ "labeling_job_id": {"key": "labelingJobId", "type": "str"},
+ "start_date_time": {"key": "startDateTime", "type": "iso-8601"},
+ "container_name": {"key": "containerName", "type": "str"},
+ "snapshot_path": {"key": "snapshotPath", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.format: str = "Coco"
+ self.container_name = None
+ self.snapshot_path = None
+
+
+class CodeConfiguration(_serialization.Model):
+ """Configuration for a scoring code asset.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar code_id: ARM resource ID of the code asset.
+ :vartype code_id: str
+ :ivar scoring_script: [Required] The script to execute on startup. eg. "score.py". Required.
+ :vartype scoring_script: str
+ """
+
+ _validation = {
+ "scoring_script": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "code_id": {"key": "codeId", "type": "str"},
+ "scoring_script": {"key": "scoringScript", "type": "str"},
+ }
+
+ def __init__(self, *, scoring_script: str, code_id: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword code_id: ARM resource ID of the code asset.
+ :paramtype code_id: str
+ :keyword scoring_script: [Required] The script to execute on startup. eg. "score.py". Required.
+ :paramtype scoring_script: str
+ """
+ super().__init__(**kwargs)
+ self.code_id = code_id
+ self.scoring_script = scoring_script
+
+
+class ProxyResource(Resource):
+ """The resource model definition for a Azure Resource Manager proxy resource. It will not have
+ tags and a location.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+
+
+class CodeContainer(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
@@ -5081,7 +6241,7 @@ class ComponentContainer(Resource):
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ComponentContainerProperties
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.CodeContainerProperties
"""
_validation = {
@@ -5097,27 +6257,20 @@ class ComponentContainer(Resource):
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "ComponentContainerProperties"},
+ "properties": {"key": "properties", "type": "CodeContainerProperties"},
}
- def __init__(self, *, properties: "_models.ComponentContainerProperties", **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.CodeContainerProperties", **kwargs: Any) -> None:
"""
:keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ComponentContainerProperties
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.CodeContainerProperties
"""
super().__init__(**kwargs)
self.properties = properties
-class ComponentContainerProperties(AssetContainer):
- """Component container definition.
-
-
- .. raw:: html
-
- .
+class CodeContainerProperties(AssetContainer):
+ """Container for code asset versions.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -5133,7 +6286,7 @@ class ComponentContainerProperties(AssetContainer):
:vartype latest_version: str
:ivar next_version: The next auto incremental version.
:vartype next_version: str
- :ivar provisioning_state: Provisioning state for the component container. Known values are:
+ :ivar provisioning_state: Provisioning state for the code container. Known values are:
"Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.AssetProvisioningState
@@ -5178,41 +6331,37 @@ def __init__(
self.provisioning_state = None
-class ComponentContainerResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of ComponentContainer entities.
+class CodeContainerResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of CodeContainer entities.
- :ivar next_link: The link to the next page of ComponentContainer objects. If null, there are no
+ :ivar next_link: The link to the next page of CodeContainer objects. If null, there are no
additional pages.
:vartype next_link: str
- :ivar value: An array of objects of type ComponentContainer.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ComponentContainer]
+ :ivar value: An array of objects of type CodeContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.CodeContainer]
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[ComponentContainer]"},
+ "value": {"key": "value", "type": "[CodeContainer]"},
}
def __init__(
- self,
- *,
- next_link: Optional[str] = None,
- value: Optional[List["_models.ComponentContainer"]] = None,
- **kwargs: Any
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.CodeContainer"]] = None, **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of ComponentContainer objects. If null, there are
- no additional pages.
+ :keyword next_link: The link to the next page of CodeContainer objects. If null, there are no
+ additional pages.
:paramtype next_link: str
- :keyword value: An array of objects of type ComponentContainer.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.ComponentContainer]
+ :keyword value: An array of objects of type CodeContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.CodeContainer]
"""
super().__init__(**kwargs)
self.next_link = next_link
self.value = value
-class ComponentVersion(Resource):
+class CodeVersion(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -5231,7 +6380,7 @@ class ComponentVersion(Resource):
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ComponentVersionProperties
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.CodeVersionProperties
"""
_validation = {
@@ -5247,20 +6396,20 @@ class ComponentVersion(Resource):
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "ComponentVersionProperties"},
+ "properties": {"key": "properties", "type": "CodeVersionProperties"},
}
- def __init__(self, *, properties: "_models.ComponentVersionProperties", **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.CodeVersionProperties", **kwargs: Any) -> None:
"""
:keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ComponentVersionProperties
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.CodeVersionProperties
"""
super().__init__(**kwargs)
self.properties = properties
-class ComponentVersionProperties(AssetBase):
- """Definition of a component version: defines resources that span component types.
+class CodeVersionProperties(AssetBase):
+ """Code asset version details.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -5270,20 +6419,17 @@ class ComponentVersionProperties(AssetBase):
:vartype properties: dict[str, str]
:ivar tags: Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:vartype is_archived: bool
- :ivar component_spec: Defines Component definition details.
-
-
- .. raw:: html
-
- .
- :vartype component_spec: JSON
- :ivar provisioning_state: Provisioning state for the component version. Known values are:
+ :ivar code_uri: Uri where code is located.
+ :vartype code_uri: str
+ :ivar provisioning_state: Provisioning state for the code version. Known values are:
"Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.AssetProvisioningState
@@ -5297,9 +6443,10 @@ class ComponentVersionProperties(AssetBase):
"description": {"key": "description", "type": "str"},
"properties": {"key": "properties", "type": "{str}"},
"tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
"is_anonymous": {"key": "isAnonymous", "type": "bool"},
"is_archived": {"key": "isArchived", "type": "bool"},
- "component_spec": {"key": "componentSpec", "type": "object"},
+ "code_uri": {"key": "codeUri", "type": "str"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
}
@@ -5309,9 +6456,10 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
is_anonymous: bool = False,
is_archived: bool = False,
- component_spec: Optional[JSON] = None,
+ code_uri: Optional[str] = None,
**kwargs: Any
) -> None:
"""
@@ -5321,3376 +6469,3466 @@ def __init__(
:paramtype properties: dict[str, str]
:keyword tags: Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:paramtype is_archived: bool
- :keyword component_spec: Defines Component definition details.
-
-
- .. raw:: html
-
- .
- :paramtype component_spec: JSON
+ :keyword code_uri: Uri where code is located.
+ :paramtype code_uri: str
"""
super().__init__(
description=description,
properties=properties,
tags=tags,
+ auto_delete_setting=auto_delete_setting,
is_anonymous=is_anonymous,
is_archived=is_archived,
**kwargs
)
- self.component_spec = component_spec
+ self.code_uri = code_uri
self.provisioning_state = None
-class ComponentVersionResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of ComponentVersion entities.
+class CodeVersionResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of CodeVersion entities.
- :ivar next_link: The link to the next page of ComponentVersion objects. If null, there are no
+ :ivar next_link: The link to the next page of CodeVersion objects. If null, there are no
additional pages.
:vartype next_link: str
- :ivar value: An array of objects of type ComponentVersion.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ComponentVersion]
+ :ivar value: An array of objects of type CodeVersion.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.CodeVersion]
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[ComponentVersion]"},
+ "value": {"key": "value", "type": "[CodeVersion]"},
}
def __init__(
- self,
- *,
- next_link: Optional[str] = None,
- value: Optional[List["_models.ComponentVersion"]] = None,
- **kwargs: Any
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.CodeVersion"]] = None, **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of ComponentVersion objects. If null, there are
- no additional pages.
+ :keyword next_link: The link to the next page of CodeVersion objects. If null, there are no
+ additional pages.
:paramtype next_link: str
- :keyword value: An array of objects of type ComponentVersion.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.ComponentVersion]
+ :keyword value: An array of objects of type CodeVersion.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.CodeVersion]
"""
super().__init__(**kwargs)
self.next_link = next_link
self.value = value
-class ComputeInstanceSchema(_serialization.Model):
- """Properties(top level) of ComputeInstance.
+class Collection(_serialization.Model):
+ """Collection.
- :ivar properties: Properties of ComputeInstance.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
+ :ivar client_id: The msi client id used to collect logging to blob storage. If it's
+ null,backend will pick a registered endpoint identity to auth.
+ :vartype client_id: str
+ :ivar data_collection_mode: Enable or disable data collection. Known values are: "Enabled" and
+ "Disabled".
+ :vartype data_collection_mode: str or
+ ~azure.mgmt.machinelearningservices.models.DataCollectionMode
+ :ivar data_id: The data asset arm resource id. Client side will ensure data asset is pointing
+ to the blob storage, and backend will collect data to the blob storage.
+ :vartype data_id: str
+ :ivar sampling_rate: The sampling rate for collection. Sampling rate 1.0 means we collect 100%
+ of data by default.
+ :vartype sampling_rate: float
"""
_attribute_map = {
- "properties": {"key": "properties", "type": "ComputeInstanceProperties"},
+ "client_id": {"key": "clientId", "type": "str"},
+ "data_collection_mode": {"key": "dataCollectionMode", "type": "str"},
+ "data_id": {"key": "dataId", "type": "str"},
+ "sampling_rate": {"key": "samplingRate", "type": "float"},
}
- def __init__(self, *, properties: Optional["_models.ComputeInstanceProperties"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ client_id: Optional[str] = None,
+ data_collection_mode: Optional[Union[str, "_models.DataCollectionMode"]] = None,
+ data_id: Optional[str] = None,
+ sampling_rate: float = 1,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword properties: Properties of ComputeInstance.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
+ :keyword client_id: The msi client id used to collect logging to blob storage. If it's
+ null,backend will pick a registered endpoint identity to auth.
+ :paramtype client_id: str
+ :keyword data_collection_mode: Enable or disable data collection. Known values are: "Enabled"
+ and "Disabled".
+ :paramtype data_collection_mode: str or
+ ~azure.mgmt.machinelearningservices.models.DataCollectionMode
+ :keyword data_id: The data asset arm resource id. Client side will ensure data asset is
+ pointing to the blob storage, and backend will collect data to the blob storage.
+ :paramtype data_id: str
+ :keyword sampling_rate: The sampling rate for collection. Sampling rate 1.0 means we collect
+ 100% of data by default.
+ :paramtype sampling_rate: float
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.client_id = client_id
+ self.data_collection_mode = data_collection_mode
+ self.data_id = data_id
+ self.sampling_rate = sampling_rate
-class ComputeInstance(Compute, ComputeInstanceSchema): # pylint: disable=too-many-instance-attributes
- """An Azure Machine Learning compute instance.
+class ColumnTransformer(_serialization.Model):
+ """Column transformer parameters.
- Variables are only populated by the server, and will be ignored when sending a request.
+ :ivar fields: Fields to apply transformer logic on.
+ :vartype fields: list[str]
+ :ivar parameters: Different properties to be passed to transformer.
+ Input expected is dictionary of key,value pairs in JSON format.
+ :vartype parameters: JSON
+ """
- All required parameters must be populated in order to send to Azure.
+ _attribute_map = {
+ "fields": {"key": "fields", "type": "[str]"},
+ "parameters": {"key": "parameters", "type": "object"},
+ }
- :ivar properties: Properties of ComputeInstance.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
- :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
- "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
- "DataLakeAnalytics", and "SynapseSpark".
- :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
- :ivar compute_location: Location for the underlying compute.
- :vartype compute_location: str
- :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
- Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
- "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningState
- :ivar description: The description of the Machine Learning compute.
+ def __init__(self, *, fields: Optional[List[str]] = None, parameters: Optional[JSON] = None, **kwargs: Any) -> None:
+ """
+ :keyword fields: Fields to apply transformer logic on.
+ :paramtype fields: list[str]
+ :keyword parameters: Different properties to be passed to transformer.
+ Input expected is dictionary of key,value pairs in JSON format.
+ :paramtype parameters: JSON
+ """
+ super().__init__(**kwargs)
+ self.fields = fields
+ self.parameters = parameters
+
+
+class CommandJob(JobBaseProperties): # pylint: disable=too-many-instance-attributes
+ """Command job definition.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: The asset description text.
:vartype description: str
- :ivar created_on: The time at which the compute was created.
- :vartype created_on: ~datetime.datetime
- :ivar modified_on: The time at which the compute was last modified.
- :vartype modified_on: ~datetime.datetime
- :ivar resource_id: ARM resource id of the underlying compute.
- :vartype resource_id: str
- :ivar provisioning_errors: Errors during provisioning.
- :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
- :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
- from outside if true, or machine learning service provisioned it if false.
- :vartype is_attached_compute: bool
- :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
- and AAD exclusively for authentication.
- :vartype disable_local_auth: bool
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar component_id: ARM resource ID of the component resource.
+ :vartype component_id: str
+ :ivar compute_id: ARM resource ID of the compute resource.
+ :vartype compute_id: str
+ :ivar display_name: Display name of job.
+ :vartype display_name: str
+ :ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :vartype experiment_name: str
+ :ivar identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
+ "Command", "Labeling", "Sweep", "Pipeline", and "Spark".
+ :vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar secrets_configuration: Configuration for secrets to be made available during runtime.
+ :vartype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
+ :ivar services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
+ "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
+ "Canceled", "NotResponding", "Paused", "Unknown", and "Scheduled".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
+ :ivar autologger_settings: Distribution configuration of the job. If set, this should be one of
+ Mpi, Tensorflow, PyTorch, or null.
+ :vartype autologger_settings: ~azure.mgmt.machinelearningservices.models.AutologgerSettings
+ :ivar code_id: ARM resource ID of the code asset.
+ :vartype code_id: str
+ :ivar command: [Required] The command to execute on startup of the job. eg. "python train.py".
+ Required.
+ :vartype command: str
+ :ivar distribution: Distribution configuration of the job. If set, this should be one of Mpi,
+ Tensorflow, PyTorch, Ray, or null.
+ :vartype distribution: ~azure.mgmt.machinelearningservices.models.DistributionConfiguration
+ :ivar environment_id: [Required] The ARM resource ID of the Environment specification for the
+ job. Required.
+ :vartype environment_id: str
+ :ivar environment_variables: Environment variables included in the job.
+ :vartype environment_variables: dict[str, str]
+ :ivar inputs: Mapping of input data bindings used in the job.
+ :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :ivar limits: Command Job limit.
+ :vartype limits: ~azure.mgmt.machinelearningservices.models.CommandJobLimits
+ :ivar outputs: Mapping of output data bindings used in the job.
+ :vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :ivar parameters: Input parameters.
+ :vartype parameters: JSON
+ :ivar queue_settings: Queue settings for the job.
+ :vartype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
+ :ivar resources: Compute Resource configuration for the job.
+ :vartype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
"""
_validation = {
- "compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
- "created_on": {"readonly": True},
- "modified_on": {"readonly": True},
- "provisioning_errors": {"readonly": True},
- "is_attached_compute": {"readonly": True},
+ "job_type": {"required": True},
+ "status": {"readonly": True},
+ "command": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "environment_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "parameters": {"readonly": True},
}
_attribute_map = {
- "properties": {"key": "properties", "type": "ComputeInstanceProperties"},
- "compute_type": {"key": "computeType", "type": "str"},
- "compute_location": {"key": "computeLocation", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
"description": {"key": "description", "type": "str"},
- "created_on": {"key": "createdOn", "type": "iso-8601"},
- "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
- "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
- "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "component_id": {"key": "componentId", "type": "str"},
+ "compute_id": {"key": "computeId", "type": "str"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "experiment_name": {"key": "experimentName", "type": "str"},
+ "identity": {"key": "identity", "type": "IdentityConfiguration"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
+ "secrets_configuration": {"key": "secretsConfiguration", "type": "{SecretConfiguration}"},
+ "services": {"key": "services", "type": "{JobService}"},
+ "status": {"key": "status", "type": "str"},
+ "autologger_settings": {"key": "autologgerSettings", "type": "AutologgerSettings"},
+ "code_id": {"key": "codeId", "type": "str"},
+ "command": {"key": "command", "type": "str"},
+ "distribution": {"key": "distribution", "type": "DistributionConfiguration"},
+ "environment_id": {"key": "environmentId", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "inputs": {"key": "inputs", "type": "{JobInput}"},
+ "limits": {"key": "limits", "type": "CommandJobLimits"},
+ "outputs": {"key": "outputs", "type": "{JobOutput}"},
+ "parameters": {"key": "parameters", "type": "object"},
+ "queue_settings": {"key": "queueSettings", "type": "QueueSettings"},
+ "resources": {"key": "resources", "type": "JobResourceConfiguration"},
}
- def __init__(
+ def __init__( # pylint: disable=too-many-locals
self,
*,
- properties: Optional["_models.ComputeInstanceProperties"] = None,
- compute_location: Optional[str] = None,
+ command: str,
+ environment_id: str,
description: Optional[str] = None,
- resource_id: Optional[str] = None,
- disable_local_auth: Optional[bool] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ component_id: Optional[str] = None,
+ compute_id: Optional[str] = None,
+ display_name: Optional[str] = None,
+ experiment_name: str = "Default",
+ identity: Optional["_models.IdentityConfiguration"] = None,
+ is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
+ secrets_configuration: Optional[Dict[str, "_models.SecretConfiguration"]] = None,
+ services: Optional[Dict[str, "_models.JobService"]] = None,
+ autologger_settings: Optional["_models.AutologgerSettings"] = None,
+ code_id: Optional[str] = None,
+ distribution: Optional["_models.DistributionConfiguration"] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ inputs: Optional[Dict[str, "_models.JobInput"]] = None,
+ limits: Optional["_models.CommandJobLimits"] = None,
+ outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ queue_settings: Optional["_models.QueueSettings"] = None,
+ resources: Optional["_models.JobResourceConfiguration"] = None,
**kwargs: Any
) -> None:
"""
- :keyword properties: Properties of ComputeInstance.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
- :keyword compute_location: Location for the underlying compute.
- :paramtype compute_location: str
- :keyword description: The description of the Machine Learning compute.
+ :keyword description: The asset description text.
:paramtype description: str
- :keyword resource_id: ARM resource id of the underlying compute.
- :paramtype resource_id: str
- :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
- MSI and AAD exclusively for authentication.
- :paramtype disable_local_auth: bool
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword component_id: ARM resource ID of the component resource.
+ :paramtype component_id: str
+ :keyword compute_id: ARM resource ID of the compute resource.
+ :paramtype compute_id: str
+ :keyword display_name: Display name of job.
+ :paramtype display_name: str
+ :keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :paramtype experiment_name: str
+ :keyword identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword secrets_configuration: Configuration for secrets to be made available during runtime.
+ :paramtype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
+ :keyword services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :keyword autologger_settings: Distribution configuration of the job. If set, this should be one
+ of Mpi, Tensorflow, PyTorch, or null.
+ :paramtype autologger_settings: ~azure.mgmt.machinelearningservices.models.AutologgerSettings
+ :keyword code_id: ARM resource ID of the code asset.
+ :paramtype code_id: str
+ :keyword command: [Required] The command to execute on startup of the job. eg. "python
+ train.py". Required.
+ :paramtype command: str
+ :keyword distribution: Distribution configuration of the job. If set, this should be one of
+ Mpi, Tensorflow, PyTorch, Ray, or null.
+ :paramtype distribution: ~azure.mgmt.machinelearningservices.models.DistributionConfiguration
+ :keyword environment_id: [Required] The ARM resource ID of the Environment specification for
+ the job. Required.
+ :paramtype environment_id: str
+ :keyword environment_variables: Environment variables included in the job.
+ :paramtype environment_variables: dict[str, str]
+ :keyword inputs: Mapping of input data bindings used in the job.
+ :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :keyword limits: Command Job limit.
+ :paramtype limits: ~azure.mgmt.machinelearningservices.models.CommandJobLimits
+ :keyword outputs: Mapping of output data bindings used in the job.
+ :paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword queue_settings: Queue settings for the job.
+ :paramtype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
+ :keyword resources: Compute Resource configuration for the job.
+ :paramtype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
"""
super().__init__(
- compute_location=compute_location,
description=description,
- resource_id=resource_id,
- disable_local_auth=disable_local_auth,
properties=properties,
+ tags=tags,
+ component_id=component_id,
+ compute_id=compute_id,
+ display_name=display_name,
+ experiment_name=experiment_name,
+ identity=identity,
+ is_archived=is_archived,
+ notification_setting=notification_setting,
+ secrets_configuration=secrets_configuration,
+ services=services,
**kwargs
)
- self.properties = properties
- self.compute_type: str = "ComputeInstance"
- self.compute_location = compute_location
- self.provisioning_state = None
- self.description = description
- self.created_on = None
- self.modified_on = None
- self.resource_id = resource_id
- self.provisioning_errors = None
- self.is_attached_compute = None
- self.disable_local_auth = disable_local_auth
+ self.job_type: str = "Command"
+ self.autologger_settings = autologger_settings
+ self.code_id = code_id
+ self.command = command
+ self.distribution = distribution
+ self.environment_id = environment_id
+ self.environment_variables = environment_variables
+ self.inputs = inputs
+ self.limits = limits
+ self.outputs = outputs
+ self.parameters = None
+ self.queue_settings = queue_settings
+ self.resources = resources
-class ComputeInstanceApplication(_serialization.Model):
- """Defines an Aml Instance application and its connectivity endpoint URI.
+class JobLimits(_serialization.Model):
+ """JobLimits.
- :ivar display_name: Name of the ComputeInstance application.
- :vartype display_name: str
- :ivar endpoint_uri: Application' endpoint URI.
- :vartype endpoint_uri: str
- """
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CommandJobLimits, SweepJobLimits
- _attribute_map = {
- "display_name": {"key": "displayName", "type": "str"},
- "endpoint_uri": {"key": "endpointUri", "type": "str"},
- }
+ All required parameters must be populated in order to send to Azure.
- def __init__(
- self, *, display_name: Optional[str] = None, endpoint_uri: Optional[str] = None, **kwargs: Any
- ) -> None:
- """
- :keyword display_name: Name of the ComputeInstance application.
- :paramtype display_name: str
- :keyword endpoint_uri: Application' endpoint URI.
- :paramtype endpoint_uri: str
+ :ivar job_limits_type: [Required] JobLimit type. Required. Known values are: "Command" and
+ "Sweep".
+ :vartype job_limits_type: str or ~azure.mgmt.machinelearningservices.models.JobLimitsType
+ :ivar timeout: The max run duration in ISO 8601 format, after which the job will be cancelled.
+ Only supports duration with precision as low as Seconds.
+ :vartype timeout: ~datetime.timedelta
+ """
+
+ _validation = {
+ "job_limits_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "job_limits_type": {"key": "jobLimitsType", "type": "str"},
+ "timeout": {"key": "timeout", "type": "duration"},
+ }
+
+ _subtype_map = {"job_limits_type": {"Command": "CommandJobLimits", "Sweep": "SweepJobLimits"}}
+
+ def __init__(self, *, timeout: Optional[datetime.timedelta] = None, **kwargs: Any) -> None:
+ """
+ :keyword timeout: The max run duration in ISO 8601 format, after which the job will be
+ cancelled. Only supports duration with precision as low as Seconds.
+ :paramtype timeout: ~datetime.timedelta
"""
super().__init__(**kwargs)
- self.display_name = display_name
- self.endpoint_uri = endpoint_uri
+ self.job_limits_type: Optional[str] = None
+ self.timeout = timeout
-class ComputeInstanceConnectivityEndpoints(_serialization.Model):
- """Defines all connectivity endpoints and properties for an ComputeInstance.
+class CommandJobLimits(JobLimits):
+ """Command Job limit class.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar public_ip_address: Public IP Address of this ComputeInstance.
- :vartype public_ip_address: str
- :ivar private_ip_address: Private IP Address of this ComputeInstance (local to the VNET in
- which the compute instance is deployed).
- :vartype private_ip_address: str
+ :ivar job_limits_type: [Required] JobLimit type. Required. Known values are: "Command" and
+ "Sweep".
+ :vartype job_limits_type: str or ~azure.mgmt.machinelearningservices.models.JobLimitsType
+ :ivar timeout: The max run duration in ISO 8601 format, after which the job will be cancelled.
+ Only supports duration with precision as low as Seconds.
+ :vartype timeout: ~datetime.timedelta
"""
_validation = {
- "public_ip_address": {"readonly": True},
- "private_ip_address": {"readonly": True},
+ "job_limits_type": {"required": True},
}
_attribute_map = {
- "public_ip_address": {"key": "publicIpAddress", "type": "str"},
- "private_ip_address": {"key": "privateIpAddress", "type": "str"},
+ "job_limits_type": {"key": "jobLimitsType", "type": "str"},
+ "timeout": {"key": "timeout", "type": "duration"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(self, *, timeout: Optional[datetime.timedelta] = None, **kwargs: Any) -> None:
+ """
+ :keyword timeout: The max run duration in ISO 8601 format, after which the job will be
+ cancelled. Only supports duration with precision as low as Seconds.
+ :paramtype timeout: ~datetime.timedelta
+ """
+ super().__init__(timeout=timeout, **kwargs)
+ self.job_limits_type: str = "Command"
+
+
+class ComponentConfiguration(_serialization.Model):
+ """Used for sweep over component.
+
+ :ivar pipeline_settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
+ :vartype pipeline_settings: JSON
+ """
+
+ _attribute_map = {
+ "pipeline_settings": {"key": "pipelineSettings", "type": "object"},
+ }
+
+ def __init__(self, *, pipeline_settings: Optional[JSON] = None, **kwargs: Any) -> None:
+ """
+ :keyword pipeline_settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
+ :paramtype pipeline_settings: JSON
+ """
super().__init__(**kwargs)
- self.public_ip_address = None
- self.private_ip_address = None
+ self.pipeline_settings = pipeline_settings
-class ComputeInstanceContainer(_serialization.Model):
- """Defines an Aml Instance container.
+class ComponentContainer(ProxyResource):
+ """Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar name: Name of the ComputeInstance container.
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
:vartype name: str
- :ivar autosave: Auto save settings. Known values are: "None", "Local", and "Remote".
- :vartype autosave: str or ~azure.mgmt.machinelearningservices.models.Autosave
- :ivar gpu: Information of GPU.
- :vartype gpu: str
- :ivar network: network of this container. Known values are: "Bridge" and "Host".
- :vartype network: str or ~azure.mgmt.machinelearningservices.models.Network
- :ivar environment: Environment information of this container.
- :vartype environment: ~azure.mgmt.machinelearningservices.models.ComputeInstanceEnvironmentInfo
- :ivar services: services of this containers.
- :vartype services: list[JSON]
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ComponentContainerProperties
"""
_validation = {
- "services": {"readonly": True},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
}
_attribute_map = {
+ "id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
- "autosave": {"key": "autosave", "type": "str"},
- "gpu": {"key": "gpu", "type": "str"},
- "network": {"key": "network", "type": "str"},
- "environment": {"key": "environment", "type": "ComputeInstanceEnvironmentInfo"},
- "services": {"key": "services", "type": "[object]"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "ComponentContainerProperties"},
}
- def __init__(
- self,
- *,
- name: Optional[str] = None,
- autosave: Optional[Union[str, "_models.Autosave"]] = None,
- gpu: Optional[str] = None,
- network: Optional[Union[str, "_models.Network"]] = None,
- environment: Optional["_models.ComputeInstanceEnvironmentInfo"] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, properties: "_models.ComponentContainerProperties", **kwargs: Any) -> None:
"""
- :keyword name: Name of the ComputeInstance container.
- :paramtype name: str
- :keyword autosave: Auto save settings. Known values are: "None", "Local", and "Remote".
- :paramtype autosave: str or ~azure.mgmt.machinelearningservices.models.Autosave
- :keyword gpu: Information of GPU.
- :paramtype gpu: str
- :keyword network: network of this container. Known values are: "Bridge" and "Host".
- :paramtype network: str or ~azure.mgmt.machinelearningservices.models.Network
- :keyword environment: Environment information of this container.
- :paramtype environment:
- ~azure.mgmt.machinelearningservices.models.ComputeInstanceEnvironmentInfo
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ComponentContainerProperties
"""
super().__init__(**kwargs)
- self.name = name
- self.autosave = autosave
- self.gpu = gpu
- self.network = network
- self.environment = environment
- self.services = None
+ self.properties = properties
-class ComputeInstanceCreatedBy(_serialization.Model):
- """Describes information on user who created this ComputeInstance.
+class ComponentContainerProperties(AssetContainer):
+ """Component container definition.
+
+
+ .. raw:: html
+
+ .
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar user_name: Name of the user.
- :vartype user_name: str
- :ivar user_org_id: Uniquely identifies user' Azure Active Directory organization.
- :vartype user_org_id: str
- :ivar user_id: Uniquely identifies the user within his/her organization.
- :vartype user_id: str
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar latest_version: The latest version inside this container.
+ :vartype latest_version: str
+ :ivar next_version: The next auto incremental version.
+ :vartype next_version: str
+ :ivar provisioning_state: Provisioning state for the component container. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
"""
_validation = {
- "user_name": {"readonly": True},
- "user_org_id": {"readonly": True},
- "user_id": {"readonly": True},
+ "latest_version": {"readonly": True},
+ "next_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
}
_attribute_map = {
- "user_name": {"key": "userName", "type": "str"},
- "user_org_id": {"key": "userOrgId", "type": "str"},
- "user_id": {"key": "userId", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "latest_version": {"key": "latestVersion", "type": "str"},
+ "next_version": {"key": "nextVersion", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.user_name = None
- self.user_org_id = None
- self.user_id = None
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_archived: bool = False,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ """
+ super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
+ self.provisioning_state = None
-class ComputeInstanceDataDisk(_serialization.Model):
- """Defines an Aml Instance DataDisk.
+class ComponentContainerResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of ComponentContainer entities.
- :ivar caching: Caching type of Data Disk. Known values are: "None", "ReadOnly", and
- "ReadWrite".
- :vartype caching: str or ~azure.mgmt.machinelearningservices.models.Caching
- :ivar disk_size_gb: The initial disk size in gigabytes.
- :vartype disk_size_gb: int
- :ivar lun: The lun is used to uniquely identify each data disk. If attaching multiple disks,
- each should have a distinct lun.
- :vartype lun: int
- :ivar storage_account_type: type of this storage account. Known values are: "Standard_LRS" and
- "Premium_LRS".
- :vartype storage_account_type: str or
- ~azure.mgmt.machinelearningservices.models.StorageAccountType
+ :ivar next_link: The link to the next page of ComponentContainer objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type ComponentContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ComponentContainer]
"""
_attribute_map = {
- "caching": {"key": "caching", "type": "str"},
- "disk_size_gb": {"key": "diskSizeGB", "type": "int"},
- "lun": {"key": "lun", "type": "int"},
- "storage_account_type": {"key": "storageAccountType", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[ComponentContainer]"},
}
def __init__(
self,
*,
- caching: Optional[Union[str, "_models.Caching"]] = None,
- disk_size_gb: Optional[int] = None,
- lun: Optional[int] = None,
- storage_account_type: Union[str, "_models.StorageAccountType"] = "Standard_LRS",
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.ComponentContainer"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword caching: Caching type of Data Disk. Known values are: "None", "ReadOnly", and
- "ReadWrite".
- :paramtype caching: str or ~azure.mgmt.machinelearningservices.models.Caching
- :keyword disk_size_gb: The initial disk size in gigabytes.
- :paramtype disk_size_gb: int
- :keyword lun: The lun is used to uniquely identify each data disk. If attaching multiple disks,
- each should have a distinct lun.
- :paramtype lun: int
- :keyword storage_account_type: type of this storage account. Known values are: "Standard_LRS"
- and "Premium_LRS".
- :paramtype storage_account_type: str or
- ~azure.mgmt.machinelearningservices.models.StorageAccountType
+ :keyword next_link: The link to the next page of ComponentContainer objects. If null, there are
+ no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type ComponentContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ComponentContainer]
"""
super().__init__(**kwargs)
- self.caching = caching
- self.disk_size_gb = disk_size_gb
- self.lun = lun
- self.storage_account_type = storage_account_type
-
-
-class ComputeInstanceDataMount(_serialization.Model):
- """Defines an Aml Instance DataMount.
-
- :ivar source: Source of the ComputeInstance data mount.
- :vartype source: str
- :ivar source_type: Data source type. Known values are: "Dataset", "Datastore", and "URI".
- :vartype source_type: str or ~azure.mgmt.machinelearningservices.models.SourceType
- :ivar mount_name: name of the ComputeInstance data mount.
- :vartype mount_name: str
- :ivar mount_action: Mount Action. Known values are: "Mount" and "Unmount".
- :vartype mount_action: str or ~azure.mgmt.machinelearningservices.models.MountAction
- :ivar created_by: who this data mount created by.
- :vartype created_by: str
- :ivar mount_path: Path of this data mount.
- :vartype mount_path: str
- :ivar mount_state: Mount state. Known values are: "MountRequested", "Mounted", "MountFailed",
- "UnmountRequested", "UnmountFailed", and "Unmounted".
- :vartype mount_state: str or ~azure.mgmt.machinelearningservices.models.MountState
- :ivar mounted_on: The time when the disk mounted.
- :vartype mounted_on: ~datetime.datetime
- :ivar error: Error of this data mount.
- :vartype error: str
- """
+ self.next_link = next_link
+ self.value = value
- _attribute_map = {
- "source": {"key": "source", "type": "str"},
- "source_type": {"key": "sourceType", "type": "str"},
- "mount_name": {"key": "mountName", "type": "str"},
- "mount_action": {"key": "mountAction", "type": "str"},
- "created_by": {"key": "createdBy", "type": "str"},
- "mount_path": {"key": "mountPath", "type": "str"},
- "mount_state": {"key": "mountState", "type": "str"},
- "mounted_on": {"key": "mountedOn", "type": "iso-8601"},
- "error": {"key": "error", "type": "str"},
- }
- def __init__(
- self,
- *,
- source: Optional[str] = None,
- source_type: Optional[Union[str, "_models.SourceType"]] = None,
- mount_name: Optional[str] = None,
- mount_action: Optional[Union[str, "_models.MountAction"]] = None,
- created_by: Optional[str] = None,
- mount_path: Optional[str] = None,
- mount_state: Optional[Union[str, "_models.MountState"]] = None,
- mounted_on: Optional[datetime.datetime] = None,
- error: Optional[str] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword source: Source of the ComputeInstance data mount.
- :paramtype source: str
- :keyword source_type: Data source type. Known values are: "Dataset", "Datastore", and "URI".
- :paramtype source_type: str or ~azure.mgmt.machinelearningservices.models.SourceType
- :keyword mount_name: name of the ComputeInstance data mount.
- :paramtype mount_name: str
- :keyword mount_action: Mount Action. Known values are: "Mount" and "Unmount".
- :paramtype mount_action: str or ~azure.mgmt.machinelearningservices.models.MountAction
- :keyword created_by: who this data mount created by.
- :paramtype created_by: str
- :keyword mount_path: Path of this data mount.
- :paramtype mount_path: str
- :keyword mount_state: Mount state. Known values are: "MountRequested", "Mounted",
- "MountFailed", "UnmountRequested", "UnmountFailed", and "Unmounted".
- :paramtype mount_state: str or ~azure.mgmt.machinelearningservices.models.MountState
- :keyword mounted_on: The time when the disk mounted.
- :paramtype mounted_on: ~datetime.datetime
- :keyword error: Error of this data mount.
- :paramtype error: str
- """
- super().__init__(**kwargs)
- self.source = source
- self.source_type = source_type
- self.mount_name = mount_name
- self.mount_action = mount_action
- self.created_by = created_by
- self.mount_path = mount_path
- self.mount_state = mount_state
- self.mounted_on = mounted_on
- self.error = error
+class ComponentVersion(ProxyResource):
+ """Azure Resource Manager resource envelope.
+ Variables are only populated by the server, and will be ignored when sending a request.
-class ComputeInstanceEnvironmentInfo(_serialization.Model):
- """Environment information.
+ All required parameters must be populated in order to send to Azure.
- :ivar name: name of environment.
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
:vartype name: str
- :ivar version: version of environment.
- :vartype version: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ComponentVersionProperties
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
_attribute_map = {
+ "id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
- "version": {"key": "version", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "ComponentVersionProperties"},
}
- def __init__(self, *, name: Optional[str] = None, version: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.ComponentVersionProperties", **kwargs: Any) -> None:
"""
- :keyword name: name of environment.
- :paramtype name: str
- :keyword version: version of environment.
- :paramtype version: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ComponentVersionProperties
"""
super().__init__(**kwargs)
- self.name = name
- self.version = version
+ self.properties = properties
-class ComputeInstanceLastOperation(_serialization.Model):
- """The last operation on ComputeInstance.
+class ComponentVersionProperties(AssetBase):
+ """Definition of a component version: defines resources that span component types.
- :ivar operation_name: Name of the last operation. Known values are: "Create", "Start", "Stop",
- "Restart", "Reimage", and "Delete".
- :vartype operation_name: str or ~azure.mgmt.machinelearningservices.models.OperationName
- :ivar operation_time: Time of the last operation.
- :vartype operation_time: ~datetime.datetime
- :ivar operation_status: Operation status. Known values are: "InProgress", "Succeeded",
- "CreateFailed", "StartFailed", "StopFailed", "RestartFailed", "ReimageFailed", and
- "DeleteFailed".
- :vartype operation_status: str or ~azure.mgmt.machinelearningservices.models.OperationStatus
- :ivar operation_trigger: Trigger of operation. Known values are: "User", "Schedule", and
- "IdleShutdown".
- :vartype operation_trigger: str or ~azure.mgmt.machinelearningservices.models.OperationTrigger
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :vartype is_archived: bool
+ :ivar component_spec: Defines Component definition details.
+
+
+ .. raw:: html
+
+ .
+ :vartype component_spec: JSON
+ :ivar provisioning_state: Provisioning state for the component version. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar stage: Stage in the component lifecycle.
+ :vartype stage: str
"""
+ _validation = {
+ "provisioning_state": {"readonly": True},
+ }
+
_attribute_map = {
- "operation_name": {"key": "operationName", "type": "str"},
- "operation_time": {"key": "operationTime", "type": "iso-8601"},
- "operation_status": {"key": "operationStatus", "type": "str"},
- "operation_trigger": {"key": "operationTrigger", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "component_spec": {"key": "componentSpec", "type": "object"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "stage": {"key": "stage", "type": "str"},
}
def __init__(
self,
*,
- operation_name: Optional[Union[str, "_models.OperationName"]] = None,
- operation_time: Optional[datetime.datetime] = None,
- operation_status: Optional[Union[str, "_models.OperationStatus"]] = None,
- operation_trigger: Optional[Union[str, "_models.OperationTrigger"]] = None,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ component_spec: Optional[JSON] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword operation_name: Name of the last operation. Known values are: "Create", "Start",
- "Stop", "Restart", "Reimage", and "Delete".
- :paramtype operation_name: str or ~azure.mgmt.machinelearningservices.models.OperationName
- :keyword operation_time: Time of the last operation.
- :paramtype operation_time: ~datetime.datetime
- :keyword operation_status: Operation status. Known values are: "InProgress", "Succeeded",
- "CreateFailed", "StartFailed", "StopFailed", "RestartFailed", "ReimageFailed", and
- "DeleteFailed".
- :paramtype operation_status: str or ~azure.mgmt.machinelearningservices.models.OperationStatus
- :keyword operation_trigger: Trigger of operation. Known values are: "User", "Schedule", and
- "IdleShutdown".
- :paramtype operation_trigger: str or
- ~azure.mgmt.machinelearningservices.models.OperationTrigger
- """
- super().__init__(**kwargs)
- self.operation_name = operation_name
- self.operation_time = operation_time
- self.operation_status = operation_status
- self.operation_trigger = operation_trigger
-
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :paramtype is_archived: bool
+ :keyword component_spec: Defines Component definition details.
-class ComputeInstanceProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
- """Compute Instance properties.
- Variables are only populated by the server, and will be ignored when sending a request.
+ .. raw:: html
- :ivar vm_size: Virtual Machine Size.
- :vartype vm_size: str
- :ivar subnet: Virtual network subnet resource ID the compute nodes belong to.
- :vartype subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
- :ivar application_sharing_policy: Policy for sharing applications on this compute instance
- among users of parent workspace. If Personal, only the creator can access applications on this
- compute instance. When Shared, any workspace user can access applications on this instance
- depending on his/her assigned role. Known values are: "Personal" and "Shared".
- :vartype application_sharing_policy: str or
- ~azure.mgmt.machinelearningservices.models.ApplicationSharingPolicy
- :ivar ssh_settings: Specifies policy and settings for SSH access.
- :vartype ssh_settings: ~azure.mgmt.machinelearningservices.models.ComputeInstanceSshSettings
- :ivar custom_services: List of Custom Services added to the compute.
- :vartype custom_services: list[~azure.mgmt.machinelearningservices.models.CustomService]
- :ivar os_image_metadata: Returns metadata about the operating system image for this compute
- instance.
- :vartype os_image_metadata: ~azure.mgmt.machinelearningservices.models.ImageMetadata
- :ivar connectivity_endpoints: Describes all connectivity endpoints available for this
- ComputeInstance.
- :vartype connectivity_endpoints:
- ~azure.mgmt.machinelearningservices.models.ComputeInstanceConnectivityEndpoints
- :ivar applications: Describes available applications and their endpoints on this
- ComputeInstance.
- :vartype applications:
- list[~azure.mgmt.machinelearningservices.models.ComputeInstanceApplication]
- :ivar created_by: Describes information on user who created this ComputeInstance.
- :vartype created_by: ~azure.mgmt.machinelearningservices.models.ComputeInstanceCreatedBy
- :ivar errors: Collection of errors encountered on this ComputeInstance.
- :vartype errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
- :ivar state: The current state of this ComputeInstance. Known values are: "Creating",
- "CreateFailed", "Deleting", "Running", "Restarting", "JobRunning", "SettingUp", "SetupFailed",
- "Starting", "Stopped", "Stopping", "UserSettingUp", "UserSetupFailed", "Unknown", and
- "Unusable".
- :vartype state: str or ~azure.mgmt.machinelearningservices.models.ComputeInstanceState
- :ivar compute_instance_authorization_type: The Compute Instance Authorization type. Available
- values are personal (default). "personal"
- :vartype compute_instance_authorization_type: str or
- ~azure.mgmt.machinelearningservices.models.ComputeInstanceAuthorizationType
- :ivar personal_compute_instance_settings: Settings for a personal compute instance.
- :vartype personal_compute_instance_settings:
- ~azure.mgmt.machinelearningservices.models.PersonalComputeInstanceSettings
- :ivar setup_scripts: Details of customized scripts to execute for setting up the cluster.
- :vartype setup_scripts: ~azure.mgmt.machinelearningservices.models.SetupScripts
- :ivar last_operation: The last operation on ComputeInstance.
- :vartype last_operation:
- ~azure.mgmt.machinelearningservices.models.ComputeInstanceLastOperation
- :ivar schedules: The list of schedules to be applied on the computes.
- :vartype schedules: ~azure.mgmt.machinelearningservices.models.ComputeSchedules
- :ivar enable_node_public_ip: Enable or disable node public IP address provisioning. Possible
- values are: Possible values are: true - Indicates that the compute nodes will have public IPs
- provisioned. false - Indicates that the compute nodes will have a private endpoint and no
- public IPs.
- :vartype enable_node_public_ip: bool
- :ivar containers: Describes informations of containers on this ComputeInstance.
- :vartype containers: list[~azure.mgmt.machinelearningservices.models.ComputeInstanceContainer]
- :ivar data_disks: Describes informations of dataDisks on this ComputeInstance.
- :vartype data_disks: list[~azure.mgmt.machinelearningservices.models.ComputeInstanceDataDisk]
- :ivar data_mounts: Describes informations of dataMounts on this ComputeInstance.
- :vartype data_mounts: list[~azure.mgmt.machinelearningservices.models.ComputeInstanceDataMount]
- :ivar versions: ComputeInstance version.
- :vartype versions: ~azure.mgmt.machinelearningservices.models.ComputeInstanceVersion
- """
-
- _validation = {
- "os_image_metadata": {"readonly": True},
- "connectivity_endpoints": {"readonly": True},
- "applications": {"readonly": True},
- "created_by": {"readonly": True},
- "errors": {"readonly": True},
- "state": {"readonly": True},
- "last_operation": {"readonly": True},
- "containers": {"readonly": True},
- "data_disks": {"readonly": True},
- "data_mounts": {"readonly": True},
- "versions": {"readonly": True},
- }
-
- _attribute_map = {
- "vm_size": {"key": "vmSize", "type": "str"},
- "subnet": {"key": "subnet", "type": "ResourceId"},
- "application_sharing_policy": {"key": "applicationSharingPolicy", "type": "str"},
- "ssh_settings": {"key": "sshSettings", "type": "ComputeInstanceSshSettings"},
- "custom_services": {"key": "customServices", "type": "[CustomService]"},
- "os_image_metadata": {"key": "osImageMetadata", "type": "ImageMetadata"},
- "connectivity_endpoints": {"key": "connectivityEndpoints", "type": "ComputeInstanceConnectivityEndpoints"},
- "applications": {"key": "applications", "type": "[ComputeInstanceApplication]"},
- "created_by": {"key": "createdBy", "type": "ComputeInstanceCreatedBy"},
- "errors": {"key": "errors", "type": "[ErrorResponse]"},
- "state": {"key": "state", "type": "str"},
- "compute_instance_authorization_type": {"key": "computeInstanceAuthorizationType", "type": "str"},
- "personal_compute_instance_settings": {
- "key": "personalComputeInstanceSettings",
- "type": "PersonalComputeInstanceSettings",
- },
- "setup_scripts": {"key": "setupScripts", "type": "SetupScripts"},
- "last_operation": {"key": "lastOperation", "type": "ComputeInstanceLastOperation"},
- "schedules": {"key": "schedules", "type": "ComputeSchedules"},
- "enable_node_public_ip": {"key": "enableNodePublicIp", "type": "bool"},
- "containers": {"key": "containers", "type": "[ComputeInstanceContainer]"},
- "data_disks": {"key": "dataDisks", "type": "[ComputeInstanceDataDisk]"},
- "data_mounts": {"key": "dataMounts", "type": "[ComputeInstanceDataMount]"},
- "versions": {"key": "versions", "type": "ComputeInstanceVersion"},
- }
-
- def __init__(
- self,
- *,
- vm_size: Optional[str] = None,
- subnet: Optional["_models.ResourceId"] = None,
- application_sharing_policy: Union[str, "_models.ApplicationSharingPolicy"] = "Shared",
- ssh_settings: Optional["_models.ComputeInstanceSshSettings"] = None,
- custom_services: Optional[List["_models.CustomService"]] = None,
- compute_instance_authorization_type: Union[str, "_models.ComputeInstanceAuthorizationType"] = "personal",
- personal_compute_instance_settings: Optional["_models.PersonalComputeInstanceSettings"] = None,
- setup_scripts: Optional["_models.SetupScripts"] = None,
- schedules: Optional["_models.ComputeSchedules"] = None,
- enable_node_public_ip: Optional[bool] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword vm_size: Virtual Machine Size.
- :paramtype vm_size: str
- :keyword subnet: Virtual network subnet resource ID the compute nodes belong to.
- :paramtype subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
- :keyword application_sharing_policy: Policy for sharing applications on this compute instance
- among users of parent workspace. If Personal, only the creator can access applications on this
- compute instance. When Shared, any workspace user can access applications on this instance
- depending on his/her assigned role. Known values are: "Personal" and "Shared".
- :paramtype application_sharing_policy: str or
- ~azure.mgmt.machinelearningservices.models.ApplicationSharingPolicy
- :keyword ssh_settings: Specifies policy and settings for SSH access.
- :paramtype ssh_settings: ~azure.mgmt.machinelearningservices.models.ComputeInstanceSshSettings
- :keyword custom_services: List of Custom Services added to the compute.
- :paramtype custom_services: list[~azure.mgmt.machinelearningservices.models.CustomService]
- :keyword compute_instance_authorization_type: The Compute Instance Authorization type.
- Available values are personal (default). "personal"
- :paramtype compute_instance_authorization_type: str or
- ~azure.mgmt.machinelearningservices.models.ComputeInstanceAuthorizationType
- :keyword personal_compute_instance_settings: Settings for a personal compute instance.
- :paramtype personal_compute_instance_settings:
- ~azure.mgmt.machinelearningservices.models.PersonalComputeInstanceSettings
- :keyword setup_scripts: Details of customized scripts to execute for setting up the cluster.
- :paramtype setup_scripts: ~azure.mgmt.machinelearningservices.models.SetupScripts
- :keyword schedules: The list of schedules to be applied on the computes.
- :paramtype schedules: ~azure.mgmt.machinelearningservices.models.ComputeSchedules
- :keyword enable_node_public_ip: Enable or disable node public IP address provisioning. Possible
- values are: Possible values are: true - Indicates that the compute nodes will have public IPs
- provisioned. false - Indicates that the compute nodes will have a private endpoint and no
- public IPs.
- :paramtype enable_node_public_ip: bool
+ .
+ :paramtype component_spec: JSON
+ :keyword stage: Stage in the component lifecycle.
+ :paramtype stage: str
"""
- super().__init__(**kwargs)
- self.vm_size = vm_size
- self.subnet = subnet
- self.application_sharing_policy = application_sharing_policy
- self.ssh_settings = ssh_settings
- self.custom_services = custom_services
- self.os_image_metadata = None
- self.connectivity_endpoints = None
- self.applications = None
- self.created_by = None
- self.errors = None
- self.state = None
- self.compute_instance_authorization_type = compute_instance_authorization_type
- self.personal_compute_instance_settings = personal_compute_instance_settings
- self.setup_scripts = setup_scripts
- self.last_operation = None
- self.schedules = schedules
- self.enable_node_public_ip = enable_node_public_ip
- self.containers = None
- self.data_disks = None
- self.data_mounts = None
- self.versions = None
-
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ auto_delete_setting=auto_delete_setting,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
+ **kwargs
+ )
+ self.component_spec = component_spec
+ self.provisioning_state = None
+ self.stage = stage
-class ComputeInstanceSshSettings(_serialization.Model):
- """Specifies policy and settings for SSH access.
- Variables are only populated by the server, and will be ignored when sending a request.
+class ComponentVersionResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of ComponentVersion entities.
- :ivar ssh_public_access: State of the public SSH port. Possible values are: Disabled -
- Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the
- public ssh port is open and accessible according to the VNet/subnet policy if applicable. Known
- values are: "Enabled" and "Disabled".
- :vartype ssh_public_access: str or ~azure.mgmt.machinelearningservices.models.SshPublicAccess
- :ivar admin_user_name: Describes the admin user name.
- :vartype admin_user_name: str
- :ivar ssh_port: Describes the port for connecting through SSH.
- :vartype ssh_port: int
- :ivar admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t
- rsa -b 2048" to generate your SSH key pairs.
- :vartype admin_public_key: str
+ :ivar next_link: The link to the next page of ComponentVersion objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type ComponentVersion.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ComponentVersion]
"""
- _validation = {
- "admin_user_name": {"readonly": True},
- "ssh_port": {"readonly": True},
- }
-
_attribute_map = {
- "ssh_public_access": {"key": "sshPublicAccess", "type": "str"},
- "admin_user_name": {"key": "adminUserName", "type": "str"},
- "ssh_port": {"key": "sshPort", "type": "int"},
- "admin_public_key": {"key": "adminPublicKey", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[ComponentVersion]"},
}
def __init__(
self,
*,
- ssh_public_access: Union[str, "_models.SshPublicAccess"] = "Disabled",
- admin_public_key: Optional[str] = None,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.ComponentVersion"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword ssh_public_access: State of the public SSH port. Possible values are: Disabled -
- Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the
- public ssh port is open and accessible according to the VNet/subnet policy if applicable. Known
- values are: "Enabled" and "Disabled".
- :paramtype ssh_public_access: str or ~azure.mgmt.machinelearningservices.models.SshPublicAccess
- :keyword admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen
- -t rsa -b 2048" to generate your SSH key pairs.
- :paramtype admin_public_key: str
+ :keyword next_link: The link to the next page of ComponentVersion objects. If null, there are
+ no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type ComponentVersion.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ComponentVersion]
"""
super().__init__(**kwargs)
- self.ssh_public_access = ssh_public_access
- self.admin_user_name = None
- self.ssh_port = None
- self.admin_public_key = admin_public_key
+ self.next_link = next_link
+ self.value = value
-class ComputeInstanceVersion(_serialization.Model):
- """Version of computeInstance.
+class ComputeInstanceSchema(_serialization.Model):
+ """Properties(top level) of ComputeInstance.
- :ivar runtime: Runtime of compute instance.
- :vartype runtime: str
+ :ivar properties: Properties of ComputeInstance.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
"""
_attribute_map = {
- "runtime": {"key": "runtime", "type": "str"},
+ "properties": {"key": "properties", "type": "ComputeInstanceProperties"},
}
- def __init__(self, *, runtime: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, properties: Optional["_models.ComputeInstanceProperties"] = None, **kwargs: Any) -> None:
"""
- :keyword runtime: Runtime of compute instance.
- :paramtype runtime: str
+ :keyword properties: Properties of ComputeInstance.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
"""
super().__init__(**kwargs)
- self.runtime = runtime
+ self.properties = properties
-class ComputeResourceSchema(_serialization.Model):
- """ComputeResourceSchema.
-
- :ivar properties: Compute properties.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.Compute
- """
-
- _attribute_map = {
- "properties": {"key": "properties", "type": "Compute"},
- }
-
- def __init__(self, *, properties: Optional["_models.Compute"] = None, **kwargs: Any) -> None:
- """
- :keyword properties: Compute properties.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.Compute
- """
- super().__init__(**kwargs)
- self.properties = properties
-
-
-class ComputeResource(Resource, ComputeResourceSchema):
- """Machine Learning compute object wrapped into ARM resource envelope.
+class ComputeInstance(Compute, ComputeInstanceSchema): # pylint: disable=too-many-instance-attributes
+ """An Azure Machine Learning compute instance.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar properties: Compute properties.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.Compute
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar identity: The identity of the resource.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar location: Specifies the location of the resource.
- :vartype location: str
- :ivar tags: Contains resource tags defined as key/value pairs.
- :vartype tags: dict[str, str]
- :ivar sku: The sku of the workspace.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar properties: Properties of ComputeInstance.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
+ :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
+ "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
+ "DataLakeAnalytics", and "SynapseSpark".
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
+ :ivar compute_location: Location for the underlying compute.
+ :vartype compute_location: str
+ :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
+ Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
+ "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningState
+ :ivar description: The description of the Machine Learning compute.
+ :vartype description: str
+ :ivar created_on: The time at which the compute was created.
+ :vartype created_on: ~datetime.datetime
+ :ivar modified_on: The time at which the compute was last modified.
+ :vartype modified_on: ~datetime.datetime
+ :ivar resource_id: ARM resource id of the underlying compute.
+ :vartype resource_id: str
+ :ivar provisioning_errors: Errors during provisioning.
+ :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
+ :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
+ from outside if true, or machine learning service provisioned it if false.
+ :vartype is_attached_compute: bool
+ :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
+ and AAD exclusively for authentication.
+ :vartype disable_local_auth: bool
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
+ "compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "created_on": {"readonly": True},
+ "modified_on": {"readonly": True},
+ "provisioning_errors": {"readonly": True},
+ "is_attached_compute": {"readonly": True},
}
_attribute_map = {
- "properties": {"key": "properties", "type": "Compute"},
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "location": {"key": "location", "type": "str"},
- "tags": {"key": "tags", "type": "{str}"},
- "sku": {"key": "sku", "type": "Sku"},
+ "properties": {"key": "properties", "type": "ComputeInstanceProperties"},
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_location": {"key": "computeLocation", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "created_on": {"key": "createdOn", "type": "iso-8601"},
+ "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
+ "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
+ "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
}
def __init__(
self,
*,
- properties: Optional["_models.Compute"] = None,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- location: Optional[str] = None,
- tags: Optional[Dict[str, str]] = None,
- sku: Optional["_models.Sku"] = None,
+ properties: Optional["_models.ComputeInstanceProperties"] = None,
+ compute_location: Optional[str] = None,
+ description: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ disable_local_auth: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
- :keyword properties: Compute properties.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.Compute
- :keyword identity: The identity of the resource.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword location: Specifies the location of the resource.
- :paramtype location: str
- :keyword tags: Contains resource tags defined as key/value pairs.
- :paramtype tags: dict[str, str]
- :keyword sku: The sku of the workspace.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :keyword properties: Properties of ComputeInstance.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
+ :keyword compute_location: Location for the underlying compute.
+ :paramtype compute_location: str
+ :keyword description: The description of the Machine Learning compute.
+ :paramtype description: str
+ :keyword resource_id: ARM resource id of the underlying compute.
+ :paramtype resource_id: str
+ :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
+ MSI and AAD exclusively for authentication.
+ :paramtype disable_local_auth: bool
"""
- super().__init__(properties=properties, **kwargs)
+ super().__init__(
+ compute_location=compute_location,
+ description=description,
+ resource_id=resource_id,
+ disable_local_auth=disable_local_auth,
+ properties=properties,
+ **kwargs
+ )
self.properties = properties
- self.identity = identity
- self.location = location
- self.tags = tags
- self.sku = sku
- self.id = None
- self.name = None
- self.type = None
- self.system_data = None
+ self.compute_type: str = "ComputeInstance"
+ self.compute_location = compute_location
+ self.provisioning_state = None
+ self.description = description
+ self.created_on = None
+ self.modified_on = None
+ self.resource_id = resource_id
+ self.provisioning_errors = None
+ self.is_attached_compute = None
+ self.disable_local_auth = disable_local_auth
-class ComputeSchedules(_serialization.Model):
- """The list of schedules to be applied on the computes.
+class ComputeInstanceApplication(_serialization.Model):
+ """Defines an Aml Instance application and its connectivity endpoint URI.
- :ivar compute_start_stop: The list of compute start stop schedules to be applied.
- :vartype compute_start_stop:
- list[~azure.mgmt.machinelearningservices.models.ComputeStartStopSchedule]
+ :ivar display_name: Name of the ComputeInstance application.
+ :vartype display_name: str
+ :ivar endpoint_uri: Application' endpoint URI.
+ :vartype endpoint_uri: str
"""
_attribute_map = {
- "compute_start_stop": {"key": "computeStartStop", "type": "[ComputeStartStopSchedule]"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "endpoint_uri": {"key": "endpointUri", "type": "str"},
}
def __init__(
- self, *, compute_start_stop: Optional[List["_models.ComputeStartStopSchedule"]] = None, **kwargs: Any
+ self, *, display_name: Optional[str] = None, endpoint_uri: Optional[str] = None, **kwargs: Any
) -> None:
"""
- :keyword compute_start_stop: The list of compute start stop schedules to be applied.
- :paramtype compute_start_stop:
- list[~azure.mgmt.machinelearningservices.models.ComputeStartStopSchedule]
+ :keyword display_name: Name of the ComputeInstance application.
+ :paramtype display_name: str
+ :keyword endpoint_uri: Application' endpoint URI.
+ :paramtype endpoint_uri: str
"""
super().__init__(**kwargs)
- self.compute_start_stop = compute_start_stop
-
+ self.display_name = display_name
+ self.endpoint_uri = endpoint_uri
-class ComputeStartStopSchedule(_serialization.Model):
- """Compute start stop schedule properties.
- Variables are only populated by the server, and will be ignored when sending a request.
+class ComputeInstanceAutologgerSettings(_serialization.Model):
+ """Specifies settings for autologger.
- :ivar id: A system assigned id for the schedule.
- :vartype id: str
- :ivar provisioning_status: The current deployment state of schedule. Known values are:
- "Completed", "Provisioning", and "Failed".
- :vartype provisioning_status: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningStatus
- :ivar status: Is the schedule enabled or disabled?. Known values are: "Enabled" and "Disabled".
- :vartype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
- :ivar action: [Required] The compute power action. Known values are: "Start" and "Stop".
- :vartype action: str or ~azure.mgmt.machinelearningservices.models.ComputePowerAction
- :ivar trigger_type: [Required] The schedule trigger type. Known values are: "Recurrence" and
- "Cron".
- :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
- :ivar recurrence: Required if triggerType is Recurrence.
- :vartype recurrence: ~azure.mgmt.machinelearningservices.models.Recurrence
- :ivar cron: Required if triggerType is Cron.
- :vartype cron: ~azure.mgmt.machinelearningservices.models.Cron
- :ivar schedule: [Deprecated] Not used any more.
- :vartype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
+ :ivar mlflow_autologger: Indicates whether mlflow autologger is enabled for notebooks. Known
+ values are: "Enabled" and "Disabled".
+ :vartype mlflow_autologger: str or ~azure.mgmt.machinelearningservices.models.MlflowAutologger
"""
- _validation = {
- "id": {"readonly": True},
- "provisioning_status": {"readonly": True},
- }
-
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "provisioning_status": {"key": "provisioningStatus", "type": "str"},
- "status": {"key": "status", "type": "str"},
- "action": {"key": "action", "type": "str"},
- "trigger_type": {"key": "triggerType", "type": "str"},
- "recurrence": {"key": "recurrence", "type": "Recurrence"},
- "cron": {"key": "cron", "type": "Cron"},
- "schedule": {"key": "schedule", "type": "ScheduleBase"},
+ "mlflow_autologger": {"key": "mlflowAutologger", "type": "str"},
}
def __init__(
- self,
- *,
- status: Optional[Union[str, "_models.ScheduleStatus"]] = None,
- action: Optional[Union[str, "_models.ComputePowerAction"]] = None,
- trigger_type: Optional[Union[str, "_models.TriggerType"]] = None,
- recurrence: Optional["_models.Recurrence"] = None,
- cron: Optional["_models.Cron"] = None,
- schedule: Optional["_models.ScheduleBase"] = None,
- **kwargs: Any
+ self, *, mlflow_autologger: Optional[Union[str, "_models.MlflowAutologger"]] = None, **kwargs: Any
) -> None:
"""
- :keyword status: Is the schedule enabled or disabled?. Known values are: "Enabled" and
- "Disabled".
- :paramtype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
- :keyword action: [Required] The compute power action. Known values are: "Start" and "Stop".
- :paramtype action: str or ~azure.mgmt.machinelearningservices.models.ComputePowerAction
- :keyword trigger_type: [Required] The schedule trigger type. Known values are: "Recurrence" and
- "Cron".
- :paramtype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
- :keyword recurrence: Required if triggerType is Recurrence.
- :paramtype recurrence: ~azure.mgmt.machinelearningservices.models.Recurrence
- :keyword cron: Required if triggerType is Cron.
- :paramtype cron: ~azure.mgmt.machinelearningservices.models.Cron
- :keyword schedule: [Deprecated] Not used any more.
- :paramtype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
+ :keyword mlflow_autologger: Indicates whether mlflow autologger is enabled for notebooks. Known
+ values are: "Enabled" and "Disabled".
+ :paramtype mlflow_autologger: str or
+ ~azure.mgmt.machinelearningservices.models.MlflowAutologger
"""
super().__init__(**kwargs)
- self.id = None
- self.provisioning_status = None
- self.status = status
- self.action = action
- self.trigger_type = trigger_type
- self.recurrence = recurrence
- self.cron = cron
- self.schedule = schedule
+ self.mlflow_autologger = mlflow_autologger
-class ContainerResourceRequirements(_serialization.Model):
- """Resource requirements for each container instance within an online deployment.
+class ComputeInstanceConnectivityEndpoints(_serialization.Model):
+ """Defines all connectivity endpoints and properties for an ComputeInstance.
- :ivar container_resource_limits: Container resource limit info:.
- :vartype container_resource_limits:
- ~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
- :ivar container_resource_requests: Container resource request info:.
- :vartype container_resource_requests:
- ~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar public_ip_address: Public IP Address of this ComputeInstance.
+ :vartype public_ip_address: str
+ :ivar private_ip_address: Private IP Address of this ComputeInstance (local to the VNET in
+ which the compute instance is deployed).
+ :vartype private_ip_address: str
"""
+ _validation = {
+ "public_ip_address": {"readonly": True},
+ "private_ip_address": {"readonly": True},
+ }
+
_attribute_map = {
- "container_resource_limits": {"key": "containerResourceLimits", "type": "ContainerResourceSettings"},
- "container_resource_requests": {"key": "containerResourceRequests", "type": "ContainerResourceSettings"},
+ "public_ip_address": {"key": "publicIpAddress", "type": "str"},
+ "private_ip_address": {"key": "privateIpAddress", "type": "str"},
}
- def __init__(
- self,
- *,
- container_resource_limits: Optional["_models.ContainerResourceSettings"] = None,
- container_resource_requests: Optional["_models.ContainerResourceSettings"] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword container_resource_limits: Container resource limit info:.
- :paramtype container_resource_limits:
- ~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
- :keyword container_resource_requests: Container resource request info:.
- :paramtype container_resource_requests:
- ~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.container_resource_limits = container_resource_limits
- self.container_resource_requests = container_resource_requests
+ self.public_ip_address = None
+ self.private_ip_address = None
-class ContainerResourceSettings(_serialization.Model):
- """ContainerResourceSettings.
+class ComputeInstanceContainer(_serialization.Model):
+ """Defines an Aml Instance container.
- :ivar cpu: Number of vCPUs request/limit for container. More info:
- https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
- :vartype cpu: str
- :ivar gpu: Number of Nvidia GPU cards request/limit for container. More info:
- https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar name: Name of the ComputeInstance container.
+ :vartype name: str
+ :ivar autosave: Auto save settings. Known values are: "None", "Local", and "Remote".
+ :vartype autosave: str or ~azure.mgmt.machinelearningservices.models.Autosave
+ :ivar gpu: Information of GPU.
:vartype gpu: str
- :ivar memory: Memory size request/limit for container. More info:
- https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
- :vartype memory: str
+ :ivar network: network of this container. Known values are: "Bridge" and "Host".
+ :vartype network: str or ~azure.mgmt.machinelearningservices.models.Network
+ :ivar environment: Environment information of this container.
+ :vartype environment: ~azure.mgmt.machinelearningservices.models.ComputeInstanceEnvironmentInfo
+ :ivar services: services of this containers.
+ :vartype services: list[JSON]
"""
+ _validation = {
+ "services": {"readonly": True},
+ }
+
_attribute_map = {
- "cpu": {"key": "cpu", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "autosave": {"key": "autosave", "type": "str"},
"gpu": {"key": "gpu", "type": "str"},
- "memory": {"key": "memory", "type": "str"},
+ "network": {"key": "network", "type": "str"},
+ "environment": {"key": "environment", "type": "ComputeInstanceEnvironmentInfo"},
+ "services": {"key": "services", "type": "[object]"},
}
def __init__(
- self, *, cpu: Optional[str] = None, gpu: Optional[str] = None, memory: Optional[str] = None, **kwargs: Any
+ self,
+ *,
+ name: Optional[str] = None,
+ autosave: Optional[Union[str, "_models.Autosave"]] = None,
+ gpu: Optional[str] = None,
+ network: Optional[Union[str, "_models.Network"]] = None,
+ environment: Optional["_models.ComputeInstanceEnvironmentInfo"] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword cpu: Number of vCPUs request/limit for container. More info:
- https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
- :paramtype cpu: str
- :keyword gpu: Number of Nvidia GPU cards request/limit for container. More info:
- https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
+ :keyword name: Name of the ComputeInstance container.
+ :paramtype name: str
+ :keyword autosave: Auto save settings. Known values are: "None", "Local", and "Remote".
+ :paramtype autosave: str or ~azure.mgmt.machinelearningservices.models.Autosave
+ :keyword gpu: Information of GPU.
:paramtype gpu: str
- :keyword memory: Memory size request/limit for container. More info:
- https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
- :paramtype memory: str
+ :keyword network: network of this container. Known values are: "Bridge" and "Host".
+ :paramtype network: str or ~azure.mgmt.machinelearningservices.models.Network
+ :keyword environment: Environment information of this container.
+ :paramtype environment:
+ ~azure.mgmt.machinelearningservices.models.ComputeInstanceEnvironmentInfo
"""
super().__init__(**kwargs)
- self.cpu = cpu
+ self.name = name
+ self.autosave = autosave
self.gpu = gpu
- self.memory = memory
+ self.network = network
+ self.environment = environment
+ self.services = None
-class CosmosDbSettings(_serialization.Model):
- """CosmosDbSettings.
+class ComputeInstanceCreatedBy(_serialization.Model):
+ """Describes information on user who created this ComputeInstance.
- :ivar collections_throughput: The throughput of the collections in cosmosdb database.
- :vartype collections_throughput: int
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar user_name: Name of the user.
+ :vartype user_name: str
+ :ivar user_org_id: Uniquely identifies user' Azure Active Directory organization.
+ :vartype user_org_id: str
+ :ivar user_id: Uniquely identifies the user within his/her organization.
+ :vartype user_id: str
"""
+ _validation = {
+ "user_name": {"readonly": True},
+ "user_org_id": {"readonly": True},
+ "user_id": {"readonly": True},
+ }
+
_attribute_map = {
- "collections_throughput": {"key": "collectionsThroughput", "type": "int"},
+ "user_name": {"key": "userName", "type": "str"},
+ "user_org_id": {"key": "userOrgId", "type": "str"},
+ "user_id": {"key": "userId", "type": "str"},
}
- def __init__(self, *, collections_throughput: Optional[int] = None, **kwargs: Any) -> None:
- """
- :keyword collections_throughput: The throughput of the collections in cosmosdb database.
- :paramtype collections_throughput: int
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.collections_throughput = collections_throughput
+ self.user_name = None
+ self.user_org_id = None
+ self.user_id = None
-class Cron(_serialization.Model):
- """The workflow trigger cron for ComputeStartStop schedule type.
+class ComputeInstanceDataDisk(_serialization.Model):
+ """Defines an Aml Instance DataDisk.
- :ivar start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
- :vartype start_time: str
- :ivar time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :vartype time_zone: str
- :ivar expression: [Required] Specifies cron expression of schedule.
- The expression should follow NCronTab format.
- :vartype expression: str
+ :ivar caching: Caching type of Data Disk. Known values are: "None", "ReadOnly", and
+ "ReadWrite".
+ :vartype caching: str or ~azure.mgmt.machinelearningservices.models.Caching
+ :ivar disk_size_gb: The initial disk size in gigabytes.
+ :vartype disk_size_gb: int
+ :ivar lun: The lun is used to uniquely identify each data disk. If attaching multiple disks,
+ each should have a distinct lun.
+ :vartype lun: int
+ :ivar storage_account_type: type of this storage account. Known values are: "Standard_LRS" and
+ "Premium_LRS".
+ :vartype storage_account_type: str or
+ ~azure.mgmt.machinelearningservices.models.StorageAccountType
"""
_attribute_map = {
- "start_time": {"key": "startTime", "type": "str"},
- "time_zone": {"key": "timeZone", "type": "str"},
- "expression": {"key": "expression", "type": "str"},
+ "caching": {"key": "caching", "type": "str"},
+ "disk_size_gb": {"key": "diskSizeGB", "type": "int"},
+ "lun": {"key": "lun", "type": "int"},
+ "storage_account_type": {"key": "storageAccountType", "type": "str"},
}
def __init__(
self,
*,
- start_time: Optional[str] = None,
- time_zone: str = "UTC",
- expression: Optional[str] = None,
+ caching: Optional[Union[str, "_models.Caching"]] = None,
+ disk_size_gb: Optional[int] = None,
+ lun: Optional[int] = None,
+ storage_account_type: Union[str, "_models.StorageAccountType"] = "Standard_LRS",
**kwargs: Any
) -> None:
"""
- :keyword start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
- :paramtype start_time: str
- :keyword time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :paramtype time_zone: str
- :keyword expression: [Required] Specifies cron expression of schedule.
- The expression should follow NCronTab format.
- :paramtype expression: str
+ :keyword caching: Caching type of Data Disk. Known values are: "None", "ReadOnly", and
+ "ReadWrite".
+ :paramtype caching: str or ~azure.mgmt.machinelearningservices.models.Caching
+ :keyword disk_size_gb: The initial disk size in gigabytes.
+ :paramtype disk_size_gb: int
+ :keyword lun: The lun is used to uniquely identify each data disk. If attaching multiple disks,
+ each should have a distinct lun.
+ :paramtype lun: int
+ :keyword storage_account_type: type of this storage account. Known values are: "Standard_LRS"
+ and "Premium_LRS".
+ :paramtype storage_account_type: str or
+ ~azure.mgmt.machinelearningservices.models.StorageAccountType
"""
super().__init__(**kwargs)
- self.start_time = start_time
- self.time_zone = time_zone
- self.expression = expression
-
-
-class TriggerBase(_serialization.Model):
- """TriggerBase.
+ self.caching = caching
+ self.disk_size_gb = disk_size_gb
+ self.lun = lun
+ self.storage_account_type = storage_account_type
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- CronTrigger, RecurrenceTrigger
- All required parameters must be populated in order to send to Azure.
+class ComputeInstanceDataMount(_serialization.Model):
+ """Defines an Aml Instance DataMount.
- :ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
- https://en.wikipedia.org/wiki/ISO_8601.
- Recommented format would be "2022-06-01T00:00:01"
- If not present, the schedule will run indefinitely.
- :vartype end_time: str
- :ivar start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
- offset.
- :vartype start_time: str
- :ivar time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :vartype time_zone: str
- :ivar trigger_type: [Required]. Required. Known values are: "Recurrence" and "Cron".
- :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
+ :ivar source: Source of the ComputeInstance data mount.
+ :vartype source: str
+ :ivar source_type: Data source type. Known values are: "Dataset", "Datastore", and "URI".
+ :vartype source_type: str or ~azure.mgmt.machinelearningservices.models.SourceType
+ :ivar mount_name: name of the ComputeInstance data mount.
+ :vartype mount_name: str
+ :ivar mount_action: Mount Action. Known values are: "Mount" and "Unmount".
+ :vartype mount_action: str or ~azure.mgmt.machinelearningservices.models.MountAction
+ :ivar created_by: who this data mount created by.
+ :vartype created_by: str
+ :ivar mount_path: Path of this data mount.
+ :vartype mount_path: str
+ :ivar mount_state: Mount state. Known values are: "MountRequested", "Mounted", "MountFailed",
+ "UnmountRequested", "UnmountFailed", and "Unmounted".
+ :vartype mount_state: str or ~azure.mgmt.machinelearningservices.models.MountState
+ :ivar mounted_on: The time when the disk mounted.
+ :vartype mounted_on: ~datetime.datetime
+ :ivar error: Error of this data mount.
+ :vartype error: str
"""
- _validation = {
- "trigger_type": {"required": True},
- }
-
- _attribute_map = {
- "end_time": {"key": "endTime", "type": "str"},
- "start_time": {"key": "startTime", "type": "str"},
- "time_zone": {"key": "timeZone", "type": "str"},
- "trigger_type": {"key": "triggerType", "type": "str"},
- }
-
- _subtype_map = {"trigger_type": {"Cron": "CronTrigger", "Recurrence": "RecurrenceTrigger"}}
-
- def __init__(
- self, *, end_time: Optional[str] = None, start_time: Optional[str] = None, time_zone: str = "UTC", **kwargs: Any
- ) -> None:
- """
- :keyword end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
- https://en.wikipedia.org/wiki/ISO_8601.
- Recommented format would be "2022-06-01T00:00:01"
- If not present, the schedule will run indefinitely.
- :paramtype end_time: str
- :keyword start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
- offset.
- :paramtype start_time: str
- :keyword time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :paramtype time_zone: str
- """
- super().__init__(**kwargs)
- self.end_time = end_time
- self.start_time = start_time
- self.time_zone = time_zone
- self.trigger_type: Optional[str] = None
-
-
-class CronTrigger(TriggerBase):
- """CronTrigger.
-
- All required parameters must be populated in order to send to Azure.
-
- :ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
- https://en.wikipedia.org/wiki/ISO_8601.
- Recommented format would be "2022-06-01T00:00:01"
- If not present, the schedule will run indefinitely.
- :vartype end_time: str
- :ivar start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
- offset.
- :vartype start_time: str
- :ivar time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :vartype time_zone: str
- :ivar trigger_type: [Required]. Required. Known values are: "Recurrence" and "Cron".
- :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
- :ivar expression: [Required] Specifies cron expression of schedule.
- The expression should follow NCronTab format. Required.
- :vartype expression: str
- """
-
- _validation = {
- "trigger_type": {"required": True},
- "expression": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- }
-
_attribute_map = {
- "end_time": {"key": "endTime", "type": "str"},
- "start_time": {"key": "startTime", "type": "str"},
- "time_zone": {"key": "timeZone", "type": "str"},
- "trigger_type": {"key": "triggerType", "type": "str"},
- "expression": {"key": "expression", "type": "str"},
+ "source": {"key": "source", "type": "str"},
+ "source_type": {"key": "sourceType", "type": "str"},
+ "mount_name": {"key": "mountName", "type": "str"},
+ "mount_action": {"key": "mountAction", "type": "str"},
+ "created_by": {"key": "createdBy", "type": "str"},
+ "mount_path": {"key": "mountPath", "type": "str"},
+ "mount_state": {"key": "mountState", "type": "str"},
+ "mounted_on": {"key": "mountedOn", "type": "iso-8601"},
+ "error": {"key": "error", "type": "str"},
}
def __init__(
self,
*,
- expression: str,
- end_time: Optional[str] = None,
- start_time: Optional[str] = None,
- time_zone: str = "UTC",
+ source: Optional[str] = None,
+ source_type: Optional[Union[str, "_models.SourceType"]] = None,
+ mount_name: Optional[str] = None,
+ mount_action: Optional[Union[str, "_models.MountAction"]] = None,
+ created_by: Optional[str] = None,
+ mount_path: Optional[str] = None,
+ mount_state: Optional[Union[str, "_models.MountState"]] = None,
+ mounted_on: Optional[datetime.datetime] = None,
+ error: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
- https://en.wikipedia.org/wiki/ISO_8601.
- Recommented format would be "2022-06-01T00:00:01"
- If not present, the schedule will run indefinitely.
- :paramtype end_time: str
- :keyword start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
- offset.
- :paramtype start_time: str
- :keyword time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :paramtype time_zone: str
- :keyword expression: [Required] Specifies cron expression of schedule.
- The expression should follow NCronTab format. Required.
- :paramtype expression: str
+ :keyword source: Source of the ComputeInstance data mount.
+ :paramtype source: str
+ :keyword source_type: Data source type. Known values are: "Dataset", "Datastore", and "URI".
+ :paramtype source_type: str or ~azure.mgmt.machinelearningservices.models.SourceType
+ :keyword mount_name: name of the ComputeInstance data mount.
+ :paramtype mount_name: str
+ :keyword mount_action: Mount Action. Known values are: "Mount" and "Unmount".
+ :paramtype mount_action: str or ~azure.mgmt.machinelearningservices.models.MountAction
+ :keyword created_by: who this data mount created by.
+ :paramtype created_by: str
+ :keyword mount_path: Path of this data mount.
+ :paramtype mount_path: str
+ :keyword mount_state: Mount state. Known values are: "MountRequested", "Mounted",
+ "MountFailed", "UnmountRequested", "UnmountFailed", and "Unmounted".
+ :paramtype mount_state: str or ~azure.mgmt.machinelearningservices.models.MountState
+ :keyword mounted_on: The time when the disk mounted.
+ :paramtype mounted_on: ~datetime.datetime
+ :keyword error: Error of this data mount.
+ :paramtype error: str
"""
- super().__init__(end_time=end_time, start_time=start_time, time_zone=time_zone, **kwargs)
- self.trigger_type: str = "Cron"
- self.expression = expression
-
+ super().__init__(**kwargs)
+ self.source = source
+ self.source_type = source_type
+ self.mount_name = mount_name
+ self.mount_action = mount_action
+ self.created_by = created_by
+ self.mount_path = mount_path
+ self.mount_state = mount_state
+ self.mounted_on = mounted_on
+ self.error = error
-class CustomForecastHorizon(ForecastHorizon):
- """The desired maximum forecast horizon in units of time-series frequency.
- All required parameters must be populated in order to send to Azure.
+class ComputeInstanceEnvironmentInfo(_serialization.Model):
+ """Environment information.
- :ivar mode: [Required] Set forecast horizon value selection mode. Required. Known values are:
- "Auto" and "Custom".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.ForecastHorizonMode
- :ivar value: [Required] Forecast horizon value. Required.
- :vartype value: int
+ :ivar name: name of environment.
+ :vartype name: str
+ :ivar version: version of environment.
+ :vartype version: str
"""
- _validation = {
- "mode": {"required": True},
- "value": {"required": True},
- }
-
_attribute_map = {
- "mode": {"key": "mode", "type": "str"},
- "value": {"key": "value", "type": "int"},
+ "name": {"key": "name", "type": "str"},
+ "version": {"key": "version", "type": "str"},
}
- def __init__(self, *, value: int, **kwargs: Any) -> None:
+ def __init__(self, *, name: Optional[str] = None, version: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword value: [Required] Forecast horizon value. Required.
- :paramtype value: int
+ :keyword name: name of environment.
+ :paramtype name: str
+ :keyword version: version of environment.
+ :paramtype version: str
"""
super().__init__(**kwargs)
- self.mode: str = "Custom"
- self.value = value
-
-
-class JobInput(_serialization.Model):
- """Command job definition.
+ self.name = name
+ self.version = version
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- CustomModelJobInput, LiteralJobInput, MLFlowModelJobInput, MLTableJobInput,
- TritonModelJobInput, UriFileJobInput, UriFolderJobInput
- All required parameters must be populated in order to send to Azure.
+class ComputeInstanceLastOperation(_serialization.Model):
+ """The last operation on ComputeInstance.
- :ivar description: Description for the input.
- :vartype description: str
- :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
- "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
- "triton_model".
- :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar operation_name: Name of the last operation. Known values are: "Create", "Start", "Stop",
+ "Restart", "Resize", "Reimage", and "Delete".
+ :vartype operation_name: str or ~azure.mgmt.machinelearningservices.models.OperationName
+ :ivar operation_time: Time of the last operation.
+ :vartype operation_time: ~datetime.datetime
+ :ivar operation_status: Operation status. Known values are: "InProgress", "Succeeded",
+ "CreateFailed", "StartFailed", "StopFailed", "RestartFailed", "ResizeFailed", "ReimageFailed",
+ and "DeleteFailed".
+ :vartype operation_status: str or ~azure.mgmt.machinelearningservices.models.OperationStatus
+ :ivar operation_trigger: Trigger of operation. Known values are: "User", "Schedule", and
+ "IdleShutdown".
+ :vartype operation_trigger: str or ~azure.mgmt.machinelearningservices.models.OperationTrigger
"""
- _validation = {
- "job_input_type": {"required": True},
- }
-
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_input_type": {"key": "jobInputType", "type": "str"},
- }
-
- _subtype_map = {
- "job_input_type": {
- "custom_model": "CustomModelJobInput",
- "literal": "LiteralJobInput",
- "mlflow_model": "MLFlowModelJobInput",
- "mltable": "MLTableJobInput",
- "triton_model": "TritonModelJobInput",
- "uri_file": "UriFileJobInput",
- "uri_folder": "UriFolderJobInput",
- }
+ "operation_name": {"key": "operationName", "type": "str"},
+ "operation_time": {"key": "operationTime", "type": "iso-8601"},
+ "operation_status": {"key": "operationStatus", "type": "str"},
+ "operation_trigger": {"key": "operationTrigger", "type": "str"},
}
- def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ operation_name: Optional[Union[str, "_models.OperationName"]] = None,
+ operation_time: Optional[datetime.datetime] = None,
+ operation_status: Optional[Union[str, "_models.OperationStatus"]] = None,
+ operation_trigger: Optional[Union[str, "_models.OperationTrigger"]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword description: Description for the input.
- :paramtype description: str
+ :keyword operation_name: Name of the last operation. Known values are: "Create", "Start",
+ "Stop", "Restart", "Resize", "Reimage", and "Delete".
+ :paramtype operation_name: str or ~azure.mgmt.machinelearningservices.models.OperationName
+ :keyword operation_time: Time of the last operation.
+ :paramtype operation_time: ~datetime.datetime
+ :keyword operation_status: Operation status. Known values are: "InProgress", "Succeeded",
+ "CreateFailed", "StartFailed", "StopFailed", "RestartFailed", "ResizeFailed", "ReimageFailed",
+ and "DeleteFailed".
+ :paramtype operation_status: str or ~azure.mgmt.machinelearningservices.models.OperationStatus
+ :keyword operation_trigger: Trigger of operation. Known values are: "User", "Schedule", and
+ "IdleShutdown".
+ :paramtype operation_trigger: str or
+ ~azure.mgmt.machinelearningservices.models.OperationTrigger
"""
super().__init__(**kwargs)
- self.description = description
- self.job_input_type: Optional[str] = None
+ self.operation_name = operation_name
+ self.operation_time = operation_time
+ self.operation_status = operation_status
+ self.operation_trigger = operation_trigger
-class CustomModelJobInput(AssetJobInput, JobInput):
- """CustomModelJobInput.
+class ComputeInstanceProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
+ """Compute Instance properties.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar description: Description for the input.
- :vartype description: str
- :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
- "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
- "triton_model".
- :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
- :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :ivar uri: [Required] Input Asset URI. Required.
- :vartype uri: str
+ :ivar vm_size: Virtual Machine Size.
+ :vartype vm_size: str
+ :ivar subnet: Virtual network subnet resource ID the compute nodes belong to.
+ :vartype subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
+ :ivar application_sharing_policy: Policy for sharing applications on this compute instance
+ among users of parent workspace. If Personal, only the creator can access applications on this
+ compute instance. When Shared, any workspace user can access applications on this instance
+ depending on his/her assigned role. Known values are: "Personal" and "Shared".
+ :vartype application_sharing_policy: str or
+ ~azure.mgmt.machinelearningservices.models.ApplicationSharingPolicy
+ :ivar autologger_settings: Specifies settings for autologger.
+ :vartype autologger_settings:
+ ~azure.mgmt.machinelearningservices.models.ComputeInstanceAutologgerSettings
+ :ivar ssh_settings: Specifies policy and settings for SSH access.
+ :vartype ssh_settings: ~azure.mgmt.machinelearningservices.models.ComputeInstanceSshSettings
+ :ivar custom_services: List of Custom Services added to the compute.
+ :vartype custom_services: list[~azure.mgmt.machinelearningservices.models.CustomService]
+ :ivar os_image_metadata: Returns metadata about the operating system image for this compute
+ instance.
+ :vartype os_image_metadata: ~azure.mgmt.machinelearningservices.models.ImageMetadata
+ :ivar connectivity_endpoints: Describes all connectivity endpoints available for this
+ ComputeInstance.
+ :vartype connectivity_endpoints:
+ ~azure.mgmt.machinelearningservices.models.ComputeInstanceConnectivityEndpoints
+ :ivar applications: Describes available applications and their endpoints on this
+ ComputeInstance.
+ :vartype applications:
+ list[~azure.mgmt.machinelearningservices.models.ComputeInstanceApplication]
+ :ivar created_by: Describes information on user who created this ComputeInstance.
+ :vartype created_by: ~azure.mgmt.machinelearningservices.models.ComputeInstanceCreatedBy
+ :ivar errors: Collection of errors encountered on this ComputeInstance.
+ :vartype errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
+ :ivar state: The current state of this ComputeInstance. Known values are: "Creating",
+ "CreateFailed", "Deleting", "Running", "Restarting", "Resizing", "JobRunning", "SettingUp",
+ "SetupFailed", "Starting", "Stopped", "Stopping", "UserSettingUp", "UserSetupFailed",
+ "Unknown", and "Unusable".
+ :vartype state: str or ~azure.mgmt.machinelearningservices.models.ComputeInstanceState
+ :ivar compute_instance_authorization_type: The Compute Instance Authorization type. Available
+ values are personal (default). "personal"
+ :vartype compute_instance_authorization_type: str or
+ ~azure.mgmt.machinelearningservices.models.ComputeInstanceAuthorizationType
+ :ivar enable_os_patching: Enable Auto OS Patching. Possible values are: true, false.
+ :vartype enable_os_patching: bool
+ :ivar enable_root_access: Enable root access. Possible values are: true, false.
+ :vartype enable_root_access: bool
+ :ivar enable_sso: Enable SSO (single sign on). Possible values are: true, false.
+ :vartype enable_sso: bool
+ :ivar release_quota_on_stop: Release quota if compute instance stopped. Possible values are:
+ true - release quota if compute instance stopped. false - don't release quota when compute
+ instance stopped.
+ :vartype release_quota_on_stop: bool
+ :ivar personal_compute_instance_settings: Settings for a personal compute instance.
+ :vartype personal_compute_instance_settings:
+ ~azure.mgmt.machinelearningservices.models.PersonalComputeInstanceSettings
+ :ivar setup_scripts: Details of customized scripts to execute for setting up the cluster.
+ :vartype setup_scripts: ~azure.mgmt.machinelearningservices.models.SetupScripts
+ :ivar last_operation: The last operation on ComputeInstance.
+ :vartype last_operation:
+ ~azure.mgmt.machinelearningservices.models.ComputeInstanceLastOperation
+ :ivar schedules: The list of schedules to be applied on the computes.
+ :vartype schedules: ~azure.mgmt.machinelearningservices.models.ComputeSchedules
+ :ivar idle_time_before_shutdown: Stops compute instance after user defined period of
+ inactivity. Time is defined in ISO8601 format. Minimum is 15 min, maximum is 3 days.
+ :vartype idle_time_before_shutdown: str
+ :ivar enable_node_public_ip: Enable or disable node public IP address provisioning. Possible
+ values are: Possible values are: true - Indicates that the compute nodes will have public IPs
+ provisioned. false - Indicates that the compute nodes will have a private endpoint and no
+ public IPs.
+ :vartype enable_node_public_ip: bool
+ :ivar containers: Describes informations of containers on this ComputeInstance.
+ :vartype containers: list[~azure.mgmt.machinelearningservices.models.ComputeInstanceContainer]
+ :ivar data_disks: Describes informations of dataDisks on this ComputeInstance.
+ :vartype data_disks: list[~azure.mgmt.machinelearningservices.models.ComputeInstanceDataDisk]
+ :ivar data_mounts: Describes informations of dataMounts on this ComputeInstance.
+ :vartype data_mounts: list[~azure.mgmt.machinelearningservices.models.ComputeInstanceDataMount]
+ :ivar versions: ComputeInstance version.
+ :vartype versions: ~azure.mgmt.machinelearningservices.models.ComputeInstanceVersion
"""
_validation = {
- "job_input_type": {"required": True},
- "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "os_image_metadata": {"readonly": True},
+ "connectivity_endpoints": {"readonly": True},
+ "applications": {"readonly": True},
+ "created_by": {"readonly": True},
+ "errors": {"readonly": True},
+ "state": {"readonly": True},
+ "last_operation": {"readonly": True},
+ "containers": {"readonly": True},
+ "data_disks": {"readonly": True},
+ "data_mounts": {"readonly": True},
+ "versions": {"readonly": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_input_type": {"key": "jobInputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
+ "vm_size": {"key": "vmSize", "type": "str"},
+ "subnet": {"key": "subnet", "type": "ResourceId"},
+ "application_sharing_policy": {"key": "applicationSharingPolicy", "type": "str"},
+ "autologger_settings": {"key": "autologgerSettings", "type": "ComputeInstanceAutologgerSettings"},
+ "ssh_settings": {"key": "sshSettings", "type": "ComputeInstanceSshSettings"},
+ "custom_services": {"key": "customServices", "type": "[CustomService]"},
+ "os_image_metadata": {"key": "osImageMetadata", "type": "ImageMetadata"},
+ "connectivity_endpoints": {"key": "connectivityEndpoints", "type": "ComputeInstanceConnectivityEndpoints"},
+ "applications": {"key": "applications", "type": "[ComputeInstanceApplication]"},
+ "created_by": {"key": "createdBy", "type": "ComputeInstanceCreatedBy"},
+ "errors": {"key": "errors", "type": "[ErrorResponse]"},
+ "state": {"key": "state", "type": "str"},
+ "compute_instance_authorization_type": {"key": "computeInstanceAuthorizationType", "type": "str"},
+ "enable_os_patching": {"key": "enableOSPatching", "type": "bool"},
+ "enable_root_access": {"key": "enableRootAccess", "type": "bool"},
+ "enable_sso": {"key": "enableSSO", "type": "bool"},
+ "release_quota_on_stop": {"key": "releaseQuotaOnStop", "type": "bool"},
+ "personal_compute_instance_settings": {
+ "key": "personalComputeInstanceSettings",
+ "type": "PersonalComputeInstanceSettings",
+ },
+ "setup_scripts": {"key": "setupScripts", "type": "SetupScripts"},
+ "last_operation": {"key": "lastOperation", "type": "ComputeInstanceLastOperation"},
+ "schedules": {"key": "schedules", "type": "ComputeSchedules"},
+ "idle_time_before_shutdown": {"key": "idleTimeBeforeShutdown", "type": "str"},
+ "enable_node_public_ip": {"key": "enableNodePublicIp", "type": "bool"},
+ "containers": {"key": "containers", "type": "[ComputeInstanceContainer]"},
+ "data_disks": {"key": "dataDisks", "type": "[ComputeInstanceDataDisk]"},
+ "data_mounts": {"key": "dataMounts", "type": "[ComputeInstanceDataMount]"},
+ "versions": {"key": "versions", "type": "ComputeInstanceVersion"},
}
- def __init__(
+ def __init__( # pylint: disable=too-many-locals
self,
*,
- uri: str,
- description: Optional[str] = None,
- mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
+ vm_size: Optional[str] = None,
+ subnet: Optional["_models.ResourceId"] = None,
+ application_sharing_policy: Union[str, "_models.ApplicationSharingPolicy"] = "Shared",
+ autologger_settings: Optional["_models.ComputeInstanceAutologgerSettings"] = None,
+ ssh_settings: Optional["_models.ComputeInstanceSshSettings"] = None,
+ custom_services: Optional[List["_models.CustomService"]] = None,
+ compute_instance_authorization_type: Union[str, "_models.ComputeInstanceAuthorizationType"] = "personal",
+ enable_os_patching: bool = False,
+ enable_root_access: bool = True,
+ enable_sso: bool = True,
+ release_quota_on_stop: bool = False,
+ personal_compute_instance_settings: Optional["_models.PersonalComputeInstanceSettings"] = None,
+ setup_scripts: Optional["_models.SetupScripts"] = None,
+ schedules: Optional["_models.ComputeSchedules"] = None,
+ idle_time_before_shutdown: Optional[str] = None,
+ enable_node_public_ip: bool = True,
**kwargs: Any
) -> None:
"""
- :keyword description: Description for the input.
- :paramtype description: str
- :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :keyword uri: [Required] Input Asset URI. Required.
- :paramtype uri: str
+ :keyword vm_size: Virtual Machine Size.
+ :paramtype vm_size: str
+ :keyword subnet: Virtual network subnet resource ID the compute nodes belong to.
+ :paramtype subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
+ :keyword application_sharing_policy: Policy for sharing applications on this compute instance
+ among users of parent workspace. If Personal, only the creator can access applications on this
+ compute instance. When Shared, any workspace user can access applications on this instance
+ depending on his/her assigned role. Known values are: "Personal" and "Shared".
+ :paramtype application_sharing_policy: str or
+ ~azure.mgmt.machinelearningservices.models.ApplicationSharingPolicy
+ :keyword autologger_settings: Specifies settings for autologger.
+ :paramtype autologger_settings:
+ ~azure.mgmt.machinelearningservices.models.ComputeInstanceAutologgerSettings
+ :keyword ssh_settings: Specifies policy and settings for SSH access.
+ :paramtype ssh_settings: ~azure.mgmt.machinelearningservices.models.ComputeInstanceSshSettings
+ :keyword custom_services: List of Custom Services added to the compute.
+ :paramtype custom_services: list[~azure.mgmt.machinelearningservices.models.CustomService]
+ :keyword compute_instance_authorization_type: The Compute Instance Authorization type.
+ Available values are personal (default). "personal"
+ :paramtype compute_instance_authorization_type: str or
+ ~azure.mgmt.machinelearningservices.models.ComputeInstanceAuthorizationType
+ :keyword enable_os_patching: Enable Auto OS Patching. Possible values are: true, false.
+ :paramtype enable_os_patching: bool
+ :keyword enable_root_access: Enable root access. Possible values are: true, false.
+ :paramtype enable_root_access: bool
+ :keyword enable_sso: Enable SSO (single sign on). Possible values are: true, false.
+ :paramtype enable_sso: bool
+ :keyword release_quota_on_stop: Release quota if compute instance stopped. Possible values are:
+ true - release quota if compute instance stopped. false - don't release quota when compute
+ instance stopped.
+ :paramtype release_quota_on_stop: bool
+ :keyword personal_compute_instance_settings: Settings for a personal compute instance.
+ :paramtype personal_compute_instance_settings:
+ ~azure.mgmt.machinelearningservices.models.PersonalComputeInstanceSettings
+ :keyword setup_scripts: Details of customized scripts to execute for setting up the cluster.
+ :paramtype setup_scripts: ~azure.mgmt.machinelearningservices.models.SetupScripts
+ :keyword schedules: The list of schedules to be applied on the computes.
+ :paramtype schedules: ~azure.mgmt.machinelearningservices.models.ComputeSchedules
+ :keyword idle_time_before_shutdown: Stops compute instance after user defined period of
+ inactivity. Time is defined in ISO8601 format. Minimum is 15 min, maximum is 3 days.
+ :paramtype idle_time_before_shutdown: str
+ :keyword enable_node_public_ip: Enable or disable node public IP address provisioning. Possible
+ values are: Possible values are: true - Indicates that the compute nodes will have public IPs
+ provisioned. false - Indicates that the compute nodes will have a private endpoint and no
+ public IPs.
+ :paramtype enable_node_public_ip: bool
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_input_type: str = "custom_model"
- self.mode = mode
- self.uri = uri
-
+ super().__init__(**kwargs)
+ self.vm_size = vm_size
+ self.subnet = subnet
+ self.application_sharing_policy = application_sharing_policy
+ self.autologger_settings = autologger_settings
+ self.ssh_settings = ssh_settings
+ self.custom_services = custom_services
+ self.os_image_metadata = None
+ self.connectivity_endpoints = None
+ self.applications = None
+ self.created_by = None
+ self.errors = None
+ self.state = None
+ self.compute_instance_authorization_type = compute_instance_authorization_type
+ self.enable_os_patching = enable_os_patching
+ self.enable_root_access = enable_root_access
+ self.enable_sso = enable_sso
+ self.release_quota_on_stop = release_quota_on_stop
+ self.personal_compute_instance_settings = personal_compute_instance_settings
+ self.setup_scripts = setup_scripts
+ self.last_operation = None
+ self.schedules = schedules
+ self.idle_time_before_shutdown = idle_time_before_shutdown
+ self.enable_node_public_ip = enable_node_public_ip
+ self.containers = None
+ self.data_disks = None
+ self.data_mounts = None
+ self.versions = None
-class JobOutput(_serialization.Model):
- """Job output definition container information on where to find job output/logs.
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- CustomModelJobOutput, MLFlowModelJobOutput, MLTableJobOutput, TritonModelJobOutput,
- UriFileJobOutput, UriFolderJobOutput
+class ComputeInstanceSshSettings(_serialization.Model):
+ """Specifies policy and settings for SSH access.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar description: Description for the output.
- :vartype description: str
- :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
- "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
- :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
+ :ivar ssh_public_access: State of the public SSH port. Possible values are: Disabled -
+ Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the
+ public ssh port is open and accessible according to the VNet/subnet policy if applicable. Known
+ values are: "Enabled" and "Disabled".
+ :vartype ssh_public_access: str or ~azure.mgmt.machinelearningservices.models.SshPublicAccess
+ :ivar admin_user_name: Describes the admin user name.
+ :vartype admin_user_name: str
+ :ivar ssh_port: Describes the port for connecting through SSH.
+ :vartype ssh_port: int
+ :ivar admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t
+ rsa -b 2048" to generate your SSH key pairs.
+ :vartype admin_public_key: str
"""
_validation = {
- "job_output_type": {"required": True},
+ "admin_user_name": {"readonly": True},
+ "ssh_port": {"readonly": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_output_type": {"key": "jobOutputType", "type": "str"},
- }
-
- _subtype_map = {
- "job_output_type": {
- "custom_model": "CustomModelJobOutput",
- "mlflow_model": "MLFlowModelJobOutput",
- "mltable": "MLTableJobOutput",
- "triton_model": "TritonModelJobOutput",
- "uri_file": "UriFileJobOutput",
- "uri_folder": "UriFolderJobOutput",
- }
+ "ssh_public_access": {"key": "sshPublicAccess", "type": "str"},
+ "admin_user_name": {"key": "adminUserName", "type": "str"},
+ "ssh_port": {"key": "sshPort", "type": "int"},
+ "admin_public_key": {"key": "adminPublicKey", "type": "str"},
}
- def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
- """
- :keyword description: Description for the output.
- :paramtype description: str
+ def __init__(
+ self,
+ *,
+ ssh_public_access: Union[str, "_models.SshPublicAccess"] = "Disabled",
+ admin_public_key: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
"""
- super().__init__(**kwargs)
- self.description = description
- self.job_output_type: Optional[str] = None
-
-
-class CustomModelJobOutput(AssetJobOutput, JobOutput):
- """CustomModelJobOutput.
+ :keyword ssh_public_access: State of the public SSH port. Possible values are: Disabled -
+ Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the
+ public ssh port is open and accessible according to the VNet/subnet policy if applicable. Known
+ values are: "Enabled" and "Disabled".
+ :paramtype ssh_public_access: str or ~azure.mgmt.machinelearningservices.models.SshPublicAccess
+ :keyword admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen
+ -t rsa -b 2048" to generate your SSH key pairs.
+ :paramtype admin_public_key: str
+ """
+ super().__init__(**kwargs)
+ self.ssh_public_access = ssh_public_access
+ self.admin_user_name = None
+ self.ssh_port = None
+ self.admin_public_key = admin_public_key
- All required parameters must be populated in order to send to Azure.
- :ivar description: Description for the output.
- :vartype description: str
- :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
- "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
- :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :ivar uri: Output Asset URI.
- :vartype uri: str
- """
+class ComputeInstanceVersion(_serialization.Model):
+ """Version of computeInstance.
- _validation = {
- "job_output_type": {"required": True},
- }
+ :ivar runtime: Runtime of compute instance.
+ :vartype runtime: str
+ """
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_output_type": {"key": "jobOutputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
+ "runtime": {"key": "runtime", "type": "str"},
}
- def __init__(
- self,
- *,
- description: Optional[str] = None,
- mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
- uri: Optional[str] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, runtime: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword description: Description for the output.
- :paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :keyword uri: Output Asset URI.
- :paramtype uri: str
+ :keyword runtime: Runtime of compute instance.
+ :paramtype runtime: str
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_output_type: str = "custom_model"
- self.mode = mode
- self.uri = uri
+ super().__init__(**kwargs)
+ self.runtime = runtime
-class CustomNCrossValidations(NCrossValidations):
- """N-Cross validations are specified by user.
+class ComputeRecurrenceSchedule(_serialization.Model):
+ """ComputeRecurrenceSchedule.
All required parameters must be populated in order to send to Azure.
- :ivar mode: [Required] Mode for determining N-Cross validations. Required. Known values are:
- "Auto" and "Custom".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.NCrossValidationsMode
- :ivar value: [Required] N-Cross validations value. Required.
- :vartype value: int
+ :ivar hours: [Required] List of hours for the schedule. Required.
+ :vartype hours: list[int]
+ :ivar minutes: [Required] List of minutes for the schedule. Required.
+ :vartype minutes: list[int]
+ :ivar month_days: List of month days for the schedule.
+ :vartype month_days: list[int]
+ :ivar week_days: List of days for the schedule.
+ :vartype week_days: list[str or ~azure.mgmt.machinelearningservices.models.ComputeWeekDay]
"""
_validation = {
- "mode": {"required": True},
- "value": {"required": True},
+ "hours": {"required": True},
+ "minutes": {"required": True},
}
_attribute_map = {
- "mode": {"key": "mode", "type": "str"},
- "value": {"key": "value", "type": "int"},
+ "hours": {"key": "hours", "type": "[int]"},
+ "minutes": {"key": "minutes", "type": "[int]"},
+ "month_days": {"key": "monthDays", "type": "[int]"},
+ "week_days": {"key": "weekDays", "type": "[str]"},
}
- def __init__(self, *, value: int, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ hours: List[int],
+ minutes: List[int],
+ month_days: Optional[List[int]] = None,
+ week_days: Optional[List[Union[str, "_models.ComputeWeekDay"]]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword value: [Required] N-Cross validations value. Required.
- :paramtype value: int
+ :keyword hours: [Required] List of hours for the schedule. Required.
+ :paramtype hours: list[int]
+ :keyword minutes: [Required] List of minutes for the schedule. Required.
+ :paramtype minutes: list[int]
+ :keyword month_days: List of month days for the schedule.
+ :paramtype month_days: list[int]
+ :keyword week_days: List of days for the schedule.
+ :paramtype week_days: list[str or ~azure.mgmt.machinelearningservices.models.ComputeWeekDay]
"""
super().__init__(**kwargs)
- self.mode: str = "Custom"
- self.value = value
-
+ self.hours = hours
+ self.minutes = minutes
+ self.month_days = month_days
+ self.week_days = week_days
-class CustomSeasonality(Seasonality):
- """CustomSeasonality.
- All required parameters must be populated in order to send to Azure.
+class ComputeResourceSchema(_serialization.Model):
+ """ComputeResourceSchema.
- :ivar mode: [Required] Seasonality mode. Required. Known values are: "Auto" and "Custom".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.SeasonalityMode
- :ivar value: [Required] Seasonality value. Required.
- :vartype value: int
+ :ivar properties: Compute properties.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.Compute
"""
- _validation = {
- "mode": {"required": True},
- "value": {"required": True},
- }
-
_attribute_map = {
- "mode": {"key": "mode", "type": "str"},
- "value": {"key": "value", "type": "int"},
+ "properties": {"key": "properties", "type": "Compute"},
}
- def __init__(self, *, value: int, **kwargs: Any) -> None:
+ def __init__(self, *, properties: Optional["_models.Compute"] = None, **kwargs: Any) -> None:
"""
- :keyword value: [Required] Seasonality value. Required.
- :paramtype value: int
+ :keyword properties: Compute properties.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.Compute
"""
super().__init__(**kwargs)
- self.mode: str = "Custom"
- self.value = value
+ self.properties = properties
-class CustomService(_serialization.Model):
- """Specifies the custom service configuration.
+class ComputeResource(Resource, ComputeResourceSchema):
+ """Machine Learning compute object wrapped into ARM resource envelope.
- :ivar additional_properties: Unmatched properties from the message are deserialized to this
- collection.
- :vartype additional_properties: dict[str, any]
- :ivar name: Name of the Custom Service.
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar properties: Compute properties.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.Compute
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
:vartype name: str
- :ivar image: Describes the Image Specifications.
- :vartype image: ~azure.mgmt.machinelearningservices.models.Image
- :ivar environment_variables: Environment Variable for the container.
- :vartype environment_variables: dict[str,
- ~azure.mgmt.machinelearningservices.models.EnvironmentVariable]
- :ivar docker: Describes the docker settings for the image.
- :vartype docker: ~azure.mgmt.machinelearningservices.models.Docker
- :ivar endpoints: Configuring the endpoints for the container.
- :vartype endpoints: list[~azure.mgmt.machinelearningservices.models.Endpoint]
- :ivar volumes: Configuring the volumes for the container.
- :vartype volumes: list[~azure.mgmt.machinelearningservices.models.VolumeDefinition]
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar identity: The identity of the resource.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar location: Specifies the location of the resource.
+ :vartype location: str
+ :ivar tags: Contains resource tags defined as key/value pairs.
+ :vartype tags: dict[str, str]
+ :ivar sku: The sku of the workspace.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ }
+
_attribute_map = {
- "additional_properties": {"key": "", "type": "{object}"},
+ "properties": {"key": "properties", "type": "Compute"},
+ "id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
- "image": {"key": "image", "type": "Image"},
- "environment_variables": {"key": "environmentVariables", "type": "{EnvironmentVariable}"},
- "docker": {"key": "docker", "type": "Docker"},
- "endpoints": {"key": "endpoints", "type": "[Endpoint]"},
- "volumes": {"key": "volumes", "type": "[VolumeDefinition]"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "location": {"key": "location", "type": "str"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "sku": {"key": "sku", "type": "Sku"},
}
def __init__(
self,
*,
- additional_properties: Optional[Dict[str, Any]] = None,
- name: Optional[str] = None,
- image: Optional["_models.Image"] = None,
- environment_variables: Optional[Dict[str, "_models.EnvironmentVariable"]] = None,
- docker: Optional["_models.Docker"] = None,
- endpoints: Optional[List["_models.Endpoint"]] = None,
- volumes: Optional[List["_models.VolumeDefinition"]] = None,
+ properties: Optional["_models.Compute"] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ location: Optional[str] = None,
+ tags: Optional[Dict[str, str]] = None,
+ sku: Optional["_models.Sku"] = None,
**kwargs: Any
) -> None:
"""
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
- collection.
- :paramtype additional_properties: dict[str, any]
- :keyword name: Name of the Custom Service.
- :paramtype name: str
- :keyword image: Describes the Image Specifications.
- :paramtype image: ~azure.mgmt.machinelearningservices.models.Image
- :keyword environment_variables: Environment Variable for the container.
- :paramtype environment_variables: dict[str,
- ~azure.mgmt.machinelearningservices.models.EnvironmentVariable]
- :keyword docker: Describes the docker settings for the image.
- :paramtype docker: ~azure.mgmt.machinelearningservices.models.Docker
- :keyword endpoints: Configuring the endpoints for the container.
- :paramtype endpoints: list[~azure.mgmt.machinelearningservices.models.Endpoint]
- :keyword volumes: Configuring the volumes for the container.
- :paramtype volumes: list[~azure.mgmt.machinelearningservices.models.VolumeDefinition]
+ :keyword properties: Compute properties.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.Compute
+ :keyword identity: The identity of the resource.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword location: Specifies the location of the resource.
+ :paramtype location: str
+ :keyword tags: Contains resource tags defined as key/value pairs.
+ :paramtype tags: dict[str, str]
+ :keyword sku: The sku of the workspace.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
- super().__init__(**kwargs)
- self.additional_properties = additional_properties
- self.name = name
- self.image = image
- self.environment_variables = environment_variables
- self.docker = docker
- self.endpoints = endpoints
- self.volumes = volumes
+ super().__init__(properties=properties, **kwargs)
+ self.properties = properties
+ self.identity = identity
+ self.location = location
+ self.tags = tags
+ self.sku = sku
+ self.id = None
+ self.name = None
+ self.type = None
+ self.system_data = None
-class CustomTargetLags(TargetLags):
- """CustomTargetLags.
-
- All required parameters must be populated in order to send to Azure.
-
- :ivar mode: [Required] Set target lags mode - Auto/Custom. Required. Known values are: "Auto"
- and "Custom".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetLagsMode
- :ivar values: [Required] Set target lags values. Required.
- :vartype values: list[int]
- """
-
- _validation = {
- "mode": {"required": True},
- "values": {"required": True},
- }
-
- _attribute_map = {
- "mode": {"key": "mode", "type": "str"},
- "values": {"key": "values", "type": "[int]"},
- }
-
- def __init__(self, *, values: List[int], **kwargs: Any) -> None:
- """
- :keyword values: [Required] Set target lags values. Required.
- :paramtype values: list[int]
- """
- super().__init__(**kwargs)
- self.mode: str = "Custom"
- self.values = values
-
-
-class CustomTargetRollingWindowSize(TargetRollingWindowSize):
- """CustomTargetRollingWindowSize.
-
- All required parameters must be populated in order to send to Azure.
+class ComputeRuntimeDto(_serialization.Model):
+ """ComputeRuntimeDto.
- :ivar mode: [Required] TargetRollingWindowSiz detection mode. Required. Known values are:
- "Auto" and "Custom".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSizeMode
- :ivar value: [Required] TargetRollingWindowSize value. Required.
- :vartype value: int
+ :ivar spark_runtime_version:
+ :vartype spark_runtime_version: str
"""
- _validation = {
- "mode": {"required": True},
- "value": {"required": True},
- }
-
_attribute_map = {
- "mode": {"key": "mode", "type": "str"},
- "value": {"key": "value", "type": "int"},
+ "spark_runtime_version": {"key": "sparkRuntimeVersion", "type": "str"},
}
- def __init__(self, *, value: int, **kwargs: Any) -> None:
+ def __init__(self, *, spark_runtime_version: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword value: [Required] TargetRollingWindowSize value. Required.
- :paramtype value: int
+ :keyword spark_runtime_version:
+ :paramtype spark_runtime_version: str
"""
super().__init__(**kwargs)
- self.mode: str = "Custom"
- self.value = value
+ self.spark_runtime_version = spark_runtime_version
-class DatabricksSchema(_serialization.Model):
- """DatabricksSchema.
+class ComputeSchedules(_serialization.Model):
+ """The list of schedules to be applied on the computes.
- :ivar properties: Properties of Databricks.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
+ :ivar compute_start_stop: The list of compute start stop schedules to be applied.
+ :vartype compute_start_stop:
+ list[~azure.mgmt.machinelearningservices.models.ComputeStartStopSchedule]
"""
_attribute_map = {
- "properties": {"key": "properties", "type": "DatabricksProperties"},
+ "compute_start_stop": {"key": "computeStartStop", "type": "[ComputeStartStopSchedule]"},
}
- def __init__(self, *, properties: Optional["_models.DatabricksProperties"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, compute_start_stop: Optional[List["_models.ComputeStartStopSchedule"]] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword properties: Properties of Databricks.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
+ :keyword compute_start_stop: The list of compute start stop schedules to be applied.
+ :paramtype compute_start_stop:
+ list[~azure.mgmt.machinelearningservices.models.ComputeStartStopSchedule]
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.compute_start_stop = compute_start_stop
-class Databricks(Compute, DatabricksSchema): # pylint: disable=too-many-instance-attributes
- """A DataFactory compute.
+class ComputeStartStopSchedule(_serialization.Model):
+ """Compute start stop schedule properties.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
-
- :ivar properties: Properties of Databricks.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
- :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
- "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
- "DataLakeAnalytics", and "SynapseSpark".
- :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
- :ivar compute_location: Location for the underlying compute.
- :vartype compute_location: str
- :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
- Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
- "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningState
- :ivar description: The description of the Machine Learning compute.
- :vartype description: str
- :ivar created_on: The time at which the compute was created.
- :vartype created_on: ~datetime.datetime
- :ivar modified_on: The time at which the compute was last modified.
- :vartype modified_on: ~datetime.datetime
- :ivar resource_id: ARM resource id of the underlying compute.
- :vartype resource_id: str
- :ivar provisioning_errors: Errors during provisioning.
- :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
- :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
- from outside if true, or machine learning service provisioned it if false.
- :vartype is_attached_compute: bool
- :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
- and AAD exclusively for authentication.
- :vartype disable_local_auth: bool
+ :ivar id: A system assigned id for the schedule.
+ :vartype id: str
+ :ivar provisioning_status: The current deployment state of schedule. Known values are:
+ "Completed", "Provisioning", and "Failed".
+ :vartype provisioning_status: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningStatus
+ :ivar status: Is the schedule enabled or disabled?. Known values are: "Enabled" and "Disabled".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
+ :ivar action: [Required] The compute power action. Known values are: "Start" and "Stop".
+ :vartype action: str or ~azure.mgmt.machinelearningservices.models.ComputePowerAction
+ :ivar trigger_type: [Required] The schedule trigger type. Known values are: "Recurrence" and
+ "Cron".
+ :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.ComputeTriggerType
+ :ivar recurrence: Required if triggerType is Recurrence.
+ :vartype recurrence: ~azure.mgmt.machinelearningservices.models.Recurrence
+ :ivar cron: Required if triggerType is Cron.
+ :vartype cron: ~azure.mgmt.machinelearningservices.models.Cron
+ :ivar schedule: [Deprecated] Not used any more.
+ :vartype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
"""
_validation = {
- "compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
- "created_on": {"readonly": True},
- "modified_on": {"readonly": True},
- "provisioning_errors": {"readonly": True},
- "is_attached_compute": {"readonly": True},
+ "id": {"readonly": True},
+ "provisioning_status": {"readonly": True},
}
_attribute_map = {
- "properties": {"key": "properties", "type": "DatabricksProperties"},
- "compute_type": {"key": "computeType", "type": "str"},
- "compute_location": {"key": "computeLocation", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "created_on": {"key": "createdOn", "type": "iso-8601"},
- "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
- "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
- "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ "id": {"key": "id", "type": "str"},
+ "provisioning_status": {"key": "provisioningStatus", "type": "str"},
+ "status": {"key": "status", "type": "str"},
+ "action": {"key": "action", "type": "str"},
+ "trigger_type": {"key": "triggerType", "type": "str"},
+ "recurrence": {"key": "recurrence", "type": "Recurrence"},
+ "cron": {"key": "cron", "type": "Cron"},
+ "schedule": {"key": "schedule", "type": "ScheduleBase"},
}
def __init__(
self,
*,
- properties: Optional["_models.DatabricksProperties"] = None,
- compute_location: Optional[str] = None,
- description: Optional[str] = None,
- resource_id: Optional[str] = None,
- disable_local_auth: Optional[bool] = None,
+ status: Optional[Union[str, "_models.ScheduleStatus"]] = None,
+ action: Optional[Union[str, "_models.ComputePowerAction"]] = None,
+ trigger_type: Optional[Union[str, "_models.ComputeTriggerType"]] = None,
+ recurrence: Optional["_models.Recurrence"] = None,
+ cron: Optional["_models.Cron"] = None,
+ schedule: Optional["_models.ScheduleBase"] = None,
**kwargs: Any
) -> None:
"""
- :keyword properties: Properties of Databricks.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
- :keyword compute_location: Location for the underlying compute.
- :paramtype compute_location: str
- :keyword description: The description of the Machine Learning compute.
- :paramtype description: str
- :keyword resource_id: ARM resource id of the underlying compute.
- :paramtype resource_id: str
- :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
- MSI and AAD exclusively for authentication.
- :paramtype disable_local_auth: bool
+ :keyword status: Is the schedule enabled or disabled?. Known values are: "Enabled" and
+ "Disabled".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
+ :keyword action: [Required] The compute power action. Known values are: "Start" and "Stop".
+ :paramtype action: str or ~azure.mgmt.machinelearningservices.models.ComputePowerAction
+ :keyword trigger_type: [Required] The schedule trigger type. Known values are: "Recurrence" and
+ "Cron".
+ :paramtype trigger_type: str or ~azure.mgmt.machinelearningservices.models.ComputeTriggerType
+ :keyword recurrence: Required if triggerType is Recurrence.
+ :paramtype recurrence: ~azure.mgmt.machinelearningservices.models.Recurrence
+ :keyword cron: Required if triggerType is Cron.
+ :paramtype cron: ~azure.mgmt.machinelearningservices.models.Cron
+ :keyword schedule: [Deprecated] Not used any more.
+ :paramtype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
"""
- super().__init__(
- compute_location=compute_location,
- description=description,
- resource_id=resource_id,
- disable_local_auth=disable_local_auth,
- properties=properties,
- **kwargs
- )
- self.properties = properties
- self.compute_type: str = "Databricks"
- self.compute_location = compute_location
- self.provisioning_state = None
- self.description = description
- self.created_on = None
- self.modified_on = None
- self.resource_id = resource_id
- self.provisioning_errors = None
- self.is_attached_compute = None
- self.disable_local_auth = disable_local_auth
+ super().__init__(**kwargs)
+ self.id = None
+ self.provisioning_status = None
+ self.status = status
+ self.action = action
+ self.trigger_type = trigger_type
+ self.recurrence = recurrence
+ self.cron = cron
+ self.schedule = schedule
-class DatabricksComputeSecretsProperties(_serialization.Model):
- """Properties of Databricks Compute Secrets.
+class ContainerResourceRequirements(_serialization.Model):
+ """Resource requirements for each container instance within an online deployment.
- :ivar databricks_access_token: access token for databricks account.
- :vartype databricks_access_token: str
+ :ivar container_resource_limits: Container resource limit info:.
+ :vartype container_resource_limits:
+ ~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
+ :ivar container_resource_requests: Container resource request info:.
+ :vartype container_resource_requests:
+ ~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
"""
_attribute_map = {
- "databricks_access_token": {"key": "databricksAccessToken", "type": "str"},
+ "container_resource_limits": {"key": "containerResourceLimits", "type": "ContainerResourceSettings"},
+ "container_resource_requests": {"key": "containerResourceRequests", "type": "ContainerResourceSettings"},
}
- def __init__(self, *, databricks_access_token: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ container_resource_limits: Optional["_models.ContainerResourceSettings"] = None,
+ container_resource_requests: Optional["_models.ContainerResourceSettings"] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword databricks_access_token: access token for databricks account.
- :paramtype databricks_access_token: str
+ :keyword container_resource_limits: Container resource limit info:.
+ :paramtype container_resource_limits:
+ ~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
+ :keyword container_resource_requests: Container resource request info:.
+ :paramtype container_resource_requests:
+ ~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
"""
super().__init__(**kwargs)
- self.databricks_access_token = databricks_access_token
+ self.container_resource_limits = container_resource_limits
+ self.container_resource_requests = container_resource_requests
-class DatabricksComputeSecrets(ComputeSecrets, DatabricksComputeSecretsProperties):
- """Secrets related to a Machine Learning compute based on Databricks.
+class ContainerResourceSettings(_serialization.Model):
+ """ContainerResourceSettings.
- All required parameters must be populated in order to send to Azure.
-
- :ivar databricks_access_token: access token for databricks account.
- :vartype databricks_access_token: str
- :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
- "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
- "DataLakeAnalytics", and "SynapseSpark".
- :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
+ :ivar cpu: Number of vCPUs request/limit for container. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
+ :vartype cpu: str
+ :ivar gpu: Number of Nvidia GPU cards request/limit for container. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
+ :vartype gpu: str
+ :ivar memory: Memory size request/limit for container. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
+ :vartype memory: str
"""
- _validation = {
- "compute_type": {"required": True},
- }
-
_attribute_map = {
- "databricks_access_token": {"key": "databricksAccessToken", "type": "str"},
- "compute_type": {"key": "computeType", "type": "str"},
+ "cpu": {"key": "cpu", "type": "str"},
+ "gpu": {"key": "gpu", "type": "str"},
+ "memory": {"key": "memory", "type": "str"},
}
- def __init__(self, *, databricks_access_token: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, cpu: Optional[str] = None, gpu: Optional[str] = None, memory: Optional[str] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword databricks_access_token: access token for databricks account.
- :paramtype databricks_access_token: str
+ :keyword cpu: Number of vCPUs request/limit for container. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
+ :paramtype cpu: str
+ :keyword gpu: Number of Nvidia GPU cards request/limit for container. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
+ :paramtype gpu: str
+ :keyword memory: Memory size request/limit for container. More info:
+ https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
+ :paramtype memory: str
"""
- super().__init__(databricks_access_token=databricks_access_token, **kwargs)
- self.databricks_access_token = databricks_access_token
- self.compute_type: str = "Databricks"
+ super().__init__(**kwargs)
+ self.cpu = cpu
+ self.gpu = gpu
+ self.memory = memory
-class DatabricksProperties(_serialization.Model):
- """Properties of Databricks.
+class CosmosDbSettings(_serialization.Model):
+ """CosmosDbSettings.
- :ivar databricks_access_token: Databricks access token.
- :vartype databricks_access_token: str
- :ivar workspace_url: Workspace Url.
- :vartype workspace_url: str
+ :ivar collections_throughput:
+ :vartype collections_throughput: int
"""
_attribute_map = {
- "databricks_access_token": {"key": "databricksAccessToken", "type": "str"},
- "workspace_url": {"key": "workspaceUrl", "type": "str"},
+ "collections_throughput": {"key": "collectionsThroughput", "type": "int"},
}
- def __init__(
- self, *, databricks_access_token: Optional[str] = None, workspace_url: Optional[str] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, collections_throughput: Optional[int] = None, **kwargs: Any) -> None:
"""
- :keyword databricks_access_token: Databricks access token.
- :paramtype databricks_access_token: str
- :keyword workspace_url: Workspace Url.
- :paramtype workspace_url: str
+ :keyword collections_throughput:
+ :paramtype collections_throughput: int
"""
super().__init__(**kwargs)
- self.databricks_access_token = databricks_access_token
- self.workspace_url = workspace_url
+ self.collections_throughput = collections_throughput
-class DataContainer(Resource):
- """Azure Resource Manager resource envelope.
+class ScheduleActionBase(_serialization.Model):
+ """ScheduleActionBase.
- Variables are only populated by the server, and will be ignored when sending a request.
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ JobScheduleAction, CreateMonitorAction, ImportDataAction, EndpointScheduleAction
All required parameters must be populated in order to send to Azure.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.DataContainerProperties
+ :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
+ are: "CreateJob", "InvokeBatchEndpoint", "ImportData", and "CreateMonitor".
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
+ "action_type": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "DataContainerProperties"},
+ "action_type": {"key": "actionType", "type": "str"},
}
- def __init__(self, *, properties: "_models.DataContainerProperties", **kwargs: Any) -> None:
- """
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.DataContainerProperties
- """
- super().__init__(**kwargs)
- self.properties = properties
+ _subtype_map = {
+ "action_type": {
+ "CreateJob": "JobScheduleAction",
+ "CreateMonitor": "CreateMonitorAction",
+ "ImportData": "ImportDataAction",
+ "InvokeBatchEndpoint": "EndpointScheduleAction",
+ }
+ }
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.action_type: Optional[str] = None
-class DataContainerProperties(AssetContainer):
- """Container for data asset versions.
- Variables are only populated by the server, and will be ignored when sending a request.
+class CreateMonitorAction(ScheduleActionBase):
+ """CreateMonitorAction.
All required parameters must be populated in order to send to Azure.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar latest_version: The latest version inside this container.
- :vartype latest_version: str
- :ivar next_version: The next auto incremental version.
- :vartype next_version: str
- :ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
- "uri_folder", and "mltable".
- :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
+ :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
+ are: "CreateJob", "InvokeBatchEndpoint", "ImportData", and "CreateMonitor".
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
+ :ivar monitor_definition: [Required] Defines the monitor. Required.
+ :vartype monitor_definition: ~azure.mgmt.machinelearningservices.models.MonitorDefinition
"""
_validation = {
- "latest_version": {"readonly": True},
- "next_version": {"readonly": True},
- "data_type": {"required": True},
+ "action_type": {"required": True},
+ "monitor_definition": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "latest_version": {"key": "latestVersion", "type": "str"},
- "next_version": {"key": "nextVersion", "type": "str"},
- "data_type": {"key": "dataType", "type": "str"},
+ "action_type": {"key": "actionType", "type": "str"},
+ "monitor_definition": {"key": "monitorDefinition", "type": "MonitorDefinition"},
}
- def __init__(
- self,
- *,
- data_type: Union[str, "_models.DataType"],
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- is_archived: bool = False,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, monitor_definition: "_models.MonitorDefinition", **kwargs: Any) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword data_type: [Required] Specifies the type of data. Required. Known values are:
- "uri_file", "uri_folder", and "mltable".
- :paramtype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
+ :keyword monitor_definition: [Required] Defines the monitor. Required.
+ :paramtype monitor_definition: ~azure.mgmt.machinelearningservices.models.MonitorDefinition
"""
- super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
- self.data_type = data_type
+ super().__init__(**kwargs)
+ self.action_type: str = "CreateMonitor"
+ self.monitor_definition = monitor_definition
-class DataContainerResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of DataContainer entities.
+class Cron(_serialization.Model):
+ """The workflow trigger cron for ComputeStartStop schedule type.
- :ivar next_link: The link to the next page of DataContainer objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type DataContainer.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.DataContainer]
+ :ivar start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
+ :vartype start_time: str
+ :ivar time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :vartype time_zone: str
+ :ivar expression: [Required] Specifies cron expression of schedule.
+ The expression should follow NCronTab format.
+ :vartype expression: str
"""
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[DataContainer]"},
+ "start_time": {"key": "startTime", "type": "str"},
+ "time_zone": {"key": "timeZone", "type": "str"},
+ "expression": {"key": "expression", "type": "str"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.DataContainer"]] = None, **kwargs: Any
+ self,
+ *,
+ start_time: Optional[str] = None,
+ time_zone: str = "UTC",
+ expression: Optional[str] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of DataContainer objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type DataContainer.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.DataContainer]
+ :keyword start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
+ :paramtype start_time: str
+ :keyword time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :paramtype time_zone: str
+ :keyword expression: [Required] Specifies cron expression of schedule.
+ The expression should follow NCronTab format.
+ :paramtype expression: str
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
+ self.start_time = start_time
+ self.time_zone = time_zone
+ self.expression = expression
-class DataFactory(Compute):
- """A DataFactory compute.
+class TriggerBase(_serialization.Model):
+ """TriggerBase.
- Variables are only populated by the server, and will be ignored when sending a request.
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CronTrigger, RecurrenceTrigger
All required parameters must be populated in order to send to Azure.
- :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
- "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
- "DataLakeAnalytics", and "SynapseSpark".
- :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
- :ivar compute_location: Location for the underlying compute.
- :vartype compute_location: str
- :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
- Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
- "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningState
- :ivar description: The description of the Machine Learning compute.
- :vartype description: str
- :ivar created_on: The time at which the compute was created.
- :vartype created_on: ~datetime.datetime
- :ivar modified_on: The time at which the compute was last modified.
- :vartype modified_on: ~datetime.datetime
- :ivar resource_id: ARM resource id of the underlying compute.
- :vartype resource_id: str
- :ivar provisioning_errors: Errors during provisioning.
- :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
- :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
- from outside if true, or machine learning service provisioned it if false.
- :vartype is_attached_compute: bool
- :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
- and AAD exclusively for authentication.
- :vartype disable_local_auth: bool
+ :ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
+ https://en.wikipedia.org/wiki/ISO_8601.
+ Recommented format would be "2022-06-01T00:00:01"
+ If not present, the schedule will run indefinitely.
+ :vartype end_time: str
+ :ivar start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
+ offset.
+ :vartype start_time: str
+ :ivar time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :vartype time_zone: str
+ :ivar trigger_type: [Required]. Required. Known values are: "Recurrence" and "Cron".
+ :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
"""
_validation = {
- "compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
- "created_on": {"readonly": True},
- "modified_on": {"readonly": True},
- "provisioning_errors": {"readonly": True},
- "is_attached_compute": {"readonly": True},
+ "trigger_type": {"required": True},
}
_attribute_map = {
- "compute_type": {"key": "computeType", "type": "str"},
- "compute_location": {"key": "computeLocation", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "created_on": {"key": "createdOn", "type": "iso-8601"},
- "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
- "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
- "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ "end_time": {"key": "endTime", "type": "str"},
+ "start_time": {"key": "startTime", "type": "str"},
+ "time_zone": {"key": "timeZone", "type": "str"},
+ "trigger_type": {"key": "triggerType", "type": "str"},
}
- def __init__(
- self,
- *,
- compute_location: Optional[str] = None,
- description: Optional[str] = None,
- resource_id: Optional[str] = None,
- disable_local_auth: Optional[bool] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword compute_location: Location for the underlying compute.
- :paramtype compute_location: str
- :keyword description: The description of the Machine Learning compute.
- :paramtype description: str
- :keyword resource_id: ARM resource id of the underlying compute.
- :paramtype resource_id: str
- :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
- MSI and AAD exclusively for authentication.
- :paramtype disable_local_auth: bool
- """
- super().__init__(
- compute_location=compute_location,
- description=description,
- resource_id=resource_id,
- disable_local_auth=disable_local_auth,
- **kwargs
- )
- self.compute_type: str = "DataFactory"
-
-
-class DataLakeAnalyticsSchema(_serialization.Model):
- """DataLakeAnalyticsSchema.
-
- :ivar properties:
- :vartype properties:
- ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsSchemaProperties
- """
-
- _attribute_map = {
- "properties": {"key": "properties", "type": "DataLakeAnalyticsSchemaProperties"},
- }
+ _subtype_map = {"trigger_type": {"Cron": "CronTrigger", "Recurrence": "RecurrenceTrigger"}}
def __init__(
- self, *, properties: Optional["_models.DataLakeAnalyticsSchemaProperties"] = None, **kwargs: Any
+ self, *, end_time: Optional[str] = None, start_time: Optional[str] = None, time_zone: str = "UTC", **kwargs: Any
) -> None:
"""
- :keyword properties:
- :paramtype properties:
- ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsSchemaProperties
+ :keyword end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
+ https://en.wikipedia.org/wiki/ISO_8601.
+ Recommented format would be "2022-06-01T00:00:01"
+ If not present, the schedule will run indefinitely.
+ :paramtype end_time: str
+ :keyword start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
+ offset.
+ :paramtype start_time: str
+ :keyword time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :paramtype time_zone: str
"""
super().__init__(**kwargs)
- self.properties = properties
-
+ self.end_time = end_time
+ self.start_time = start_time
+ self.time_zone = time_zone
+ self.trigger_type: Optional[str] = None
-class DataLakeAnalytics(Compute, DataLakeAnalyticsSchema): # pylint: disable=too-many-instance-attributes
- """A DataLakeAnalytics compute.
- Variables are only populated by the server, and will be ignored when sending a request.
+class CronTrigger(TriggerBase):
+ """CronTrigger.
All required parameters must be populated in order to send to Azure.
- :ivar properties:
- :vartype properties:
- ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsSchemaProperties
- :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
- "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
- "DataLakeAnalytics", and "SynapseSpark".
- :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
- :ivar compute_location: Location for the underlying compute.
- :vartype compute_location: str
- :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
- Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
- "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningState
- :ivar description: The description of the Machine Learning compute.
- :vartype description: str
- :ivar created_on: The time at which the compute was created.
- :vartype created_on: ~datetime.datetime
- :ivar modified_on: The time at which the compute was last modified.
- :vartype modified_on: ~datetime.datetime
- :ivar resource_id: ARM resource id of the underlying compute.
- :vartype resource_id: str
- :ivar provisioning_errors: Errors during provisioning.
- :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
- :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
- from outside if true, or machine learning service provisioned it if false.
- :vartype is_attached_compute: bool
- :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
- and AAD exclusively for authentication.
- :vartype disable_local_auth: bool
+ :ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
+ https://en.wikipedia.org/wiki/ISO_8601.
+ Recommented format would be "2022-06-01T00:00:01"
+ If not present, the schedule will run indefinitely.
+ :vartype end_time: str
+ :ivar start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
+ offset.
+ :vartype start_time: str
+ :ivar time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :vartype time_zone: str
+ :ivar trigger_type: [Required]. Required. Known values are: "Recurrence" and "Cron".
+ :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
+ :ivar expression: [Required] Specifies cron expression of schedule.
+ The expression should follow NCronTab format. Required.
+ :vartype expression: str
"""
_validation = {
- "compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
- "created_on": {"readonly": True},
- "modified_on": {"readonly": True},
- "provisioning_errors": {"readonly": True},
- "is_attached_compute": {"readonly": True},
+ "trigger_type": {"required": True},
+ "expression": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "properties": {"key": "properties", "type": "DataLakeAnalyticsSchemaProperties"},
- "compute_type": {"key": "computeType", "type": "str"},
- "compute_location": {"key": "computeLocation", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "created_on": {"key": "createdOn", "type": "iso-8601"},
- "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
- "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
- "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ "end_time": {"key": "endTime", "type": "str"},
+ "start_time": {"key": "startTime", "type": "str"},
+ "time_zone": {"key": "timeZone", "type": "str"},
+ "trigger_type": {"key": "triggerType", "type": "str"},
+ "expression": {"key": "expression", "type": "str"},
}
def __init__(
self,
*,
- properties: Optional["_models.DataLakeAnalyticsSchemaProperties"] = None,
- compute_location: Optional[str] = None,
- description: Optional[str] = None,
- resource_id: Optional[str] = None,
- disable_local_auth: Optional[bool] = None,
+ expression: str,
+ end_time: Optional[str] = None,
+ start_time: Optional[str] = None,
+ time_zone: str = "UTC",
**kwargs: Any
) -> None:
"""
- :keyword properties:
- :paramtype properties:
- ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsSchemaProperties
- :keyword compute_location: Location for the underlying compute.
- :paramtype compute_location: str
- :keyword description: The description of the Machine Learning compute.
- :paramtype description: str
- :keyword resource_id: ARM resource id of the underlying compute.
- :paramtype resource_id: str
- :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
- MSI and AAD exclusively for authentication.
- :paramtype disable_local_auth: bool
+ :keyword end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
+ https://en.wikipedia.org/wiki/ISO_8601.
+ Recommented format would be "2022-06-01T00:00:01"
+ If not present, the schedule will run indefinitely.
+ :paramtype end_time: str
+ :keyword start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
+ offset.
+ :paramtype start_time: str
+ :keyword time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :paramtype time_zone: str
+ :keyword expression: [Required] Specifies cron expression of schedule.
+ The expression should follow NCronTab format. Required.
+ :paramtype expression: str
"""
- super().__init__(
- compute_location=compute_location,
- description=description,
- resource_id=resource_id,
- disable_local_auth=disable_local_auth,
- properties=properties,
- **kwargs
- )
- self.properties = properties
- self.compute_type: str = "DataLakeAnalytics"
- self.compute_location = compute_location
- self.provisioning_state = None
- self.description = description
- self.created_on = None
- self.modified_on = None
- self.resource_id = resource_id
- self.provisioning_errors = None
- self.is_attached_compute = None
- self.disable_local_auth = disable_local_auth
+ super().__init__(end_time=end_time, start_time=start_time, time_zone=time_zone, **kwargs)
+ self.trigger_type: str = "Cron"
+ self.expression = expression
-class DataLakeAnalyticsSchemaProperties(_serialization.Model):
- """DataLakeAnalyticsSchemaProperties.
+class CsvExportSummary(ExportSummary):
+ """CsvExportSummary.
- :ivar data_lake_store_account_name: DataLake Store Account Name.
- :vartype data_lake_store_account_name: str
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar end_date_time: The time when the export was completed.
+ :vartype end_date_time: ~datetime.datetime
+ :ivar exported_row_count: The total number of labeled datapoints exported.
+ :vartype exported_row_count: int
+ :ivar format: [Required] The format of exported labels, also as the discriminator. Required.
+ Known values are: "Dataset", "Coco", and "CSV".
+ :vartype format: str or ~azure.mgmt.machinelearningservices.models.ExportFormatType
+ :ivar labeling_job_id: Name and identifier of the job containing exported labels.
+ :vartype labeling_job_id: str
+ :ivar start_date_time: The time when the export was requested.
+ :vartype start_date_time: ~datetime.datetime
+ :ivar container_name: The container name to which the labels will be exported.
+ :vartype container_name: str
+ :ivar snapshot_path: The output path where the labels will be exported.
+ :vartype snapshot_path: str
"""
+ _validation = {
+ "end_date_time": {"readonly": True},
+ "exported_row_count": {"readonly": True},
+ "format": {"required": True},
+ "labeling_job_id": {"readonly": True},
+ "start_date_time": {"readonly": True},
+ "container_name": {"readonly": True},
+ "snapshot_path": {"readonly": True},
+ }
+
_attribute_map = {
- "data_lake_store_account_name": {"key": "dataLakeStoreAccountName", "type": "str"},
+ "end_date_time": {"key": "endDateTime", "type": "iso-8601"},
+ "exported_row_count": {"key": "exportedRowCount", "type": "int"},
+ "format": {"key": "format", "type": "str"},
+ "labeling_job_id": {"key": "labelingJobId", "type": "str"},
+ "start_date_time": {"key": "startDateTime", "type": "iso-8601"},
+ "container_name": {"key": "containerName", "type": "str"},
+ "snapshot_path": {"key": "snapshotPath", "type": "str"},
}
- def __init__(self, *, data_lake_store_account_name: Optional[str] = None, **kwargs: Any) -> None:
- """
- :keyword data_lake_store_account_name: DataLake Store Account Name.
- :paramtype data_lake_store_account_name: str
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.data_lake_store_account_name = data_lake_store_account_name
+ self.format: str = "CSV"
+ self.container_name = None
+ self.snapshot_path = None
-class DataPathAssetReference(AssetReferenceBase):
- """Reference to an asset via its path in a datastore.
+class CustomForecastHorizon(ForecastHorizon):
+ """The desired maximum forecast horizon in units of time-series frequency.
All required parameters must be populated in order to send to Azure.
- :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
- are: "Id", "DataPath", and "OutputPath".
- :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
- :ivar datastore_id: ARM resource ID of the datastore where the asset is located.
- :vartype datastore_id: str
- :ivar path: The path of the file/directory in the datastore.
- :vartype path: str
+ :ivar mode: [Required] Set forecast horizon value selection mode. Required. Known values are:
+ "Auto" and "Custom".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.ForecastHorizonMode
+ :ivar value: [Required] Forecast horizon value. Required.
+ :vartype value: int
"""
_validation = {
- "reference_type": {"required": True},
+ "mode": {"required": True},
+ "value": {"required": True},
}
_attribute_map = {
- "reference_type": {"key": "referenceType", "type": "str"},
- "datastore_id": {"key": "datastoreId", "type": "str"},
- "path": {"key": "path", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "value": {"key": "value", "type": "int"},
}
- def __init__(self, *, datastore_id: Optional[str] = None, path: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, value: int, **kwargs: Any) -> None:
"""
- :keyword datastore_id: ARM resource ID of the datastore where the asset is located.
- :paramtype datastore_id: str
- :keyword path: The path of the file/directory in the datastore.
- :paramtype path: str
+ :keyword value: [Required] Forecast horizon value. Required.
+ :paramtype value: int
"""
super().__init__(**kwargs)
- self.reference_type: str = "DataPath"
- self.datastore_id = datastore_id
- self.path = path
-
+ self.mode: str = "Custom"
+ self.value = value
-class Datastore(Resource):
- """Azure Resource Manager resource envelope.
- Variables are only populated by the server, and will be ignored when sending a request.
+class CustomInferencingServer(InferencingServer):
+ """Custom inference server configurations.
All required parameters must be populated in order to send to Azure.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.DatastoreProperties
+ :ivar server_type: [Required] Inferencing server type for various targets. Required. Known
+ values are: "AzureMLOnline", "AzureMLBatch", "Triton", and "Custom".
+ :vartype server_type: str or ~azure.mgmt.machinelearningservices.models.InferencingServerType
+ :ivar inference_configuration: Inference configuration for custom inferencing.
+ :vartype inference_configuration:
+ ~azure.mgmt.machinelearningservices.models.OnlineInferenceConfiguration
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
- }
-
- _attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "DatastoreProperties"},
+ "server_type": {"required": True},
}
- def __init__(self, *, properties: "_models.DatastoreProperties", **kwargs: Any) -> None:
- """
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.DatastoreProperties
- """
- super().__init__(**kwargs)
- self.properties = properties
-
-
-class DatastoreResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of Datastore entities.
-
- :ivar next_link: The link to the next page of Datastore objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type Datastore.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.Datastore]
- """
-
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[Datastore]"},
+ "server_type": {"key": "serverType", "type": "str"},
+ "inference_configuration": {"key": "inferenceConfiguration", "type": "OnlineInferenceConfiguration"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.Datastore"]] = None, **kwargs: Any
+ self, *, inference_configuration: Optional["_models.OnlineInferenceConfiguration"] = None, **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of Datastore objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type Datastore.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.Datastore]
+ :keyword inference_configuration: Inference configuration for custom inferencing.
+ :paramtype inference_configuration:
+ ~azure.mgmt.machinelearningservices.models.OnlineInferenceConfiguration
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
+ self.server_type: str = "Custom"
+ self.inference_configuration = inference_configuration
-class DataVersionBase(Resource):
- """Azure Resource Manager resource envelope.
-
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class CustomKeys(_serialization.Model):
+ """Custom Keys credential object.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.DataVersionBaseProperties
+ :ivar keys: Dictionary of :code:``.
+ :vartype keys: dict[str, str]
"""
- _validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
- }
-
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "DataVersionBaseProperties"},
+ "keys": {"key": "keys", "type": "{str}"},
}
- def __init__(self, *, properties: "_models.DataVersionBaseProperties", **kwargs: Any) -> None:
+ def __init__(self, *, keys: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.DataVersionBaseProperties
+ :keyword keys: Dictionary of :code:``.
+ :paramtype keys: dict[str, str]
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.keys = keys
-class DataVersionBaseProperties(AssetBase):
- """Data version base definition.
+class CustomKeysWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """Category:= CustomKeys
+ AuthType:= CustomKeys (as type discriminator)
+ Credentials:= {CustomKeys} as
+ Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.CustomKeys
+ Target:= {any value}
+ Use Metadata property bag for ApiVersion and other metadata fields.
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- MLTableData, UriFileDataVersion, UriFolderDataVersion
+ Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
- :vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
- "uri_folder", and "mltable".
- :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
- :ivar data_uri: [Required] Uri of the data. Example:
- https://go.microsoft.com/fwlink/?linkid=2202330. Required.
- :vartype data_uri: str
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
+ :ivar target:
+ :vartype target: str
+ :ivar credentials: Custom Keys credential object.
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.CustomKeys
"""
_validation = {
- "data_type": {"required": True},
- "data_uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "is_anonymous": {"key": "isAnonymous", "type": "bool"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "data_type": {"key": "dataType", "type": "str"},
- "data_uri": {"key": "dataUri", "type": "str"},
- }
-
- _subtype_map = {
- "data_type": {"mltable": "MLTableData", "uri_file": "UriFileDataVersion", "uri_folder": "UriFolderDataVersion"}
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
+ "target": {"key": "target", "type": "str"},
+ "credentials": {"key": "credentials", "type": "CustomKeys"},
}
def __init__(
self,
*,
- data_uri: str,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- is_anonymous: bool = False,
- is_archived: bool = False,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
+ target: Optional[str] = None,
+ credentials: Optional["_models.CustomKeys"] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
- :paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword data_uri: [Required] Uri of the data. Example:
- https://go.microsoft.com/fwlink/?linkid=2202330. Required.
- :paramtype data_uri: str
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
+ :keyword target:
+ :paramtype target: str
+ :keyword credentials: Custom Keys credential object.
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.CustomKeys
"""
super().__init__(
- description=description,
- properties=properties,
- tags=tags,
- is_anonymous=is_anonymous,
- is_archived=is_archived,
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ metadata=metadata,
+ target=target,
**kwargs
)
- self.data_type: Optional[str] = None
- self.data_uri = data_uri
+ self.auth_type: str = "CustomKeys"
+ self.credentials = credentials
-class DataVersionBaseResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of DataVersionBase entities.
+class CustomMetricThreshold(_serialization.Model):
+ """CustomMetricThreshold.
- :ivar next_link: The link to the next page of DataVersionBase objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type DataVersionBase.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.DataVersionBase]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar metric: [Required] The user-defined metric to calculate. Required.
+ :vartype metric: str
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
+ _validation = {
+ "metric": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[DataVersionBase]"},
+ "metric": {"key": "metric", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.DataVersionBase"]] = None, **kwargs: Any
+ self, *, metric: str, threshold: Optional["_models.MonitoringThreshold"] = None, **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of DataVersionBase objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type DataVersionBase.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.DataVersionBase]
+ :keyword metric: [Required] The user-defined metric to calculate. Required.
+ :paramtype metric: str
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
+ self.metric = metric
+ self.threshold = threshold
-class OnlineScaleSettings(_serialization.Model):
- """Online deployment scaling configuration.
+class JobInput(_serialization.Model):
+ """Command job definition.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- DefaultScaleSettings, TargetUtilizationScaleSettings
+ CustomModelJobInput, LiteralJobInput, MLFlowModelJobInput, MLTableJobInput,
+ TritonModelJobInput, UriFileJobInput, UriFolderJobInput
All required parameters must be populated in order to send to Azure.
- :ivar scale_type: [Required] Type of deployment scaling algorithm. Required. Known values are:
- "Default" and "TargetUtilization".
- :vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.ScaleType
+ :ivar description: Description for the input.
+ :vartype description: str
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
- "scale_type": {"required": True},
+ "job_input_type": {"required": True},
}
_attribute_map = {
- "scale_type": {"key": "scaleType", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
}
_subtype_map = {
- "scale_type": {"Default": "DefaultScaleSettings", "TargetUtilization": "TargetUtilizationScaleSettings"}
- }
-
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.scale_type: Optional[str] = None
+ "job_input_type": {
+ "custom_model": "CustomModelJobInput",
+ "literal": "LiteralJobInput",
+ "mlflow_model": "MLFlowModelJobInput",
+ "mltable": "MLTableJobInput",
+ "triton_model": "TritonModelJobInput",
+ "uri_file": "UriFileJobInput",
+ "uri_folder": "UriFolderJobInput",
+ }
+ }
+
+ def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword description: Description for the input.
+ :paramtype description: str
+ """
+ super().__init__(**kwargs)
+ self.description = description
+ self.job_input_type: Optional[str] = None
-class DefaultScaleSettings(OnlineScaleSettings):
- """DefaultScaleSettings.
+class CustomModelJobInput(AssetJobInput, JobInput):
+ """CustomModelJobInput.
All required parameters must be populated in order to send to Azure.
- :ivar scale_type: [Required] Type of deployment scaling algorithm. Required. Known values are:
- "Default" and "TargetUtilization".
- :vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.ScaleType
+ :ivar description: Description for the input.
+ :vartype description: str
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
"""
_validation = {
- "scale_type": {"required": True},
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "scale_type": {"key": "scaleType", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.scale_type: str = "Default"
+ def __init__(
+ self,
+ *,
+ uri: str,
+ description: Optional[str] = None,
+ mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: Description for the input.
+ :paramtype description: str
+ :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ """
+ super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ self.description = description
+ self.job_input_type: str = "custom_model"
+ self.mode = mode
+ self.uri = uri
-class DeploymentLogs(_serialization.Model):
- """DeploymentLogs.
+class JobOutput(_serialization.Model):
+ """Job output definition container information on where to find job output/logs.
- :ivar content: The retrieved online deployment logs.
- :vartype content: str
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CustomModelJobOutput, MLFlowModelJobOutput, MLTableJobOutput, TritonModelJobOutput,
+ UriFileJobOutput, UriFolderJobOutput
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: Description for the output.
+ :vartype description: str
+ :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
+ "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
+ :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
+ _validation = {
+ "job_output_type": {"required": True},
+ }
+
_attribute_map = {
- "content": {"key": "content", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "job_output_type": {"key": "jobOutputType", "type": "str"},
}
- def __init__(self, *, content: Optional[str] = None, **kwargs: Any) -> None:
+ _subtype_map = {
+ "job_output_type": {
+ "custom_model": "CustomModelJobOutput",
+ "mlflow_model": "MLFlowModelJobOutput",
+ "mltable": "MLTableJobOutput",
+ "triton_model": "TritonModelJobOutput",
+ "uri_file": "UriFileJobOutput",
+ "uri_folder": "UriFolderJobOutput",
+ }
+ }
+
+ def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword content: The retrieved online deployment logs.
- :paramtype content: str
+ :keyword description: Description for the output.
+ :paramtype description: str
"""
super().__init__(**kwargs)
- self.content = content
+ self.description = description
+ self.job_output_type: Optional[str] = None
-class DeploymentLogsRequest(_serialization.Model):
- """DeploymentLogsRequest.
+class CustomModelJobOutput(AssetJobOutput, JobOutput):
+ """CustomModelJobOutput.
- :ivar container_type: The type of container to retrieve logs from. Known values are:
- "StorageInitializer" and "InferenceServer".
- :vartype container_type: str or ~azure.mgmt.machinelearningservices.models.ContainerType
- :ivar tail: The maximum number of lines to tail.
- :vartype tail: int
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: Description for the output.
+ :vartype description: str
+ :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
+ "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
+ :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
+ :ivar asset_name: Output Asset Name.
+ :vartype asset_name: str
+ :ivar asset_version: Output Asset Version.
+ :vartype asset_version: str
+ :ivar auto_delete_setting: Auto delete setting of output data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :ivar uri: Output Asset URI.
+ :vartype uri: str
"""
+ _validation = {
+ "job_output_type": {"required": True},
+ }
+
_attribute_map = {
- "container_type": {"key": "containerType", "type": "str"},
- "tail": {"key": "tail", "type": "int"},
+ "description": {"key": "description", "type": "str"},
+ "job_output_type": {"key": "jobOutputType", "type": "str"},
+ "asset_name": {"key": "assetName", "type": "str"},
+ "asset_version": {"key": "assetVersion", "type": "str"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
}
def __init__(
self,
*,
- container_type: Optional[Union[str, "_models.ContainerType"]] = None,
- tail: Optional[int] = None,
+ description: Optional[str] = None,
+ asset_name: Optional[str] = None,
+ asset_version: Optional[str] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
+ uri: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword container_type: The type of container to retrieve logs from. Known values are:
- "StorageInitializer" and "InferenceServer".
- :paramtype container_type: str or ~azure.mgmt.machinelearningservices.models.ContainerType
- :keyword tail: The maximum number of lines to tail.
- :paramtype tail: int
+ :keyword description: Description for the output.
+ :paramtype description: str
+ :keyword asset_name: Output Asset Name.
+ :paramtype asset_name: str
+ :keyword asset_version: Output Asset Version.
+ :paramtype asset_version: str
+ :keyword auto_delete_setting: Auto delete setting of output data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :keyword uri: Output Asset URI.
+ :paramtype uri: str
"""
- super().__init__(**kwargs)
- self.container_type = container_type
- self.tail = tail
+ super().__init__(
+ asset_name=asset_name,
+ asset_version=asset_version,
+ auto_delete_setting=auto_delete_setting,
+ mode=mode,
+ uri=uri,
+ description=description,
+ **kwargs
+ )
+ self.description = description
+ self.job_output_type: str = "custom_model"
+ self.asset_name = asset_name
+ self.asset_version = asset_version
+ self.auto_delete_setting = auto_delete_setting
+ self.mode = mode
+ self.uri = uri
-class ResourceConfiguration(_serialization.Model):
- """ResourceConfiguration.
+class MonitoringSignalBase(_serialization.Model):
+ """MonitoringSignalBase.
- :ivar instance_count: Optional number of instances or nodes used by the compute target.
- :vartype instance_count: int
- :ivar instance_type: Optional type of VM used as supported by the compute target.
- :vartype instance_type: str
- :ivar properties: Additional properties bag.
- :vartype properties: dict[str, JSON]
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CustomMonitoringSignal, DataDriftMonitoringSignal, DataQualityMonitoringSignal,
+ FeatureAttributionDriftMonitoringSignal, GenerationSafetyQualityMonitoringSignal,
+ GenerationTokenUsageSignal, ModelPerformanceSignal, PredictionDriftMonitoringSignal
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", "Custom",
+ "ModelPerformance", "GenerationSafetyQuality", and "GenerationTokenStatistics".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
"""
+ _validation = {
+ "signal_type": {"required": True},
+ }
+
_attribute_map = {
- "instance_count": {"key": "instanceCount", "type": "int"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "properties": {"key": "properties", "type": "{object}"},
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "signal_type": {
+ "Custom": "CustomMonitoringSignal",
+ "DataDrift": "DataDriftMonitoringSignal",
+ "DataQuality": "DataQualityMonitoringSignal",
+ "FeatureAttributionDrift": "FeatureAttributionDriftMonitoringSignal",
+ "GenerationSafetyQuality": "GenerationSafetyQualityMonitoringSignal",
+ "GenerationTokenStatistics": "GenerationTokenUsageSignal",
+ "ModelPerformance": "ModelPerformanceSignal",
+ "PredictionDrift": "PredictionDriftMonitoringSignal",
+ }
}
def __init__(
self,
*,
- instance_count: int = 1,
- instance_type: Optional[str] = None,
- properties: Optional[Dict[str, JSON]] = None,
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> None:
"""
- :keyword instance_count: Optional number of instances or nodes used by the compute target.
- :paramtype instance_count: int
- :keyword instance_type: Optional type of VM used as supported by the compute target.
- :paramtype instance_type: str
- :keyword properties: Additional properties bag.
- :paramtype properties: dict[str, JSON]
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
"""
super().__init__(**kwargs)
- self.instance_count = instance_count
- self.instance_type = instance_type
+ self.notification_types = notification_types
self.properties = properties
+ self.signal_type: Optional[str] = None
-class DeploymentResourceConfiguration(ResourceConfiguration):
- """DeploymentResourceConfiguration.
+class CustomMonitoringSignal(MonitoringSignalBase):
+ """CustomMonitoringSignal.
- :ivar instance_count: Optional number of instances or nodes used by the compute target.
- :vartype instance_count: int
- :ivar instance_type: Optional type of VM used as supported by the compute target.
- :vartype instance_type: str
- :ivar properties: Additional properties bag.
- :vartype properties: dict[str, JSON]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", "Custom",
+ "ModelPerformance", "GenerationSafetyQuality", and "GenerationTokenStatistics".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar component_id: [Required] ARM resource ID of the component resource used to calculate the
+ custom metrics. Required.
+ :vartype component_id: str
+ :ivar input_assets: Monitoring assets to take as input. Key is the component input port name,
+ value is the data asset.
+ :vartype input_assets: dict[str,
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :ivar inputs: Extra component parameters to take as input. Key is the component literal input
+ port name, value is the parameter value.
+ :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :ivar metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.CustomMetricThreshold]
+ :ivar workspace_connection: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype workspace_connection:
+ ~azure.mgmt.machinelearningservices.models.MonitoringWorkspaceConnection
"""
+ _validation = {
+ "signal_type": {"required": True},
+ "component_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "metric_thresholds": {"required": True},
+ "workspace_connection": {"required": True},
+ }
+
_attribute_map = {
- "instance_count": {"key": "instanceCount", "type": "int"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "properties": {"key": "properties", "type": "{object}"},
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "component_id": {"key": "componentId", "type": "str"},
+ "input_assets": {"key": "inputAssets", "type": "{MonitoringInputDataBase}"},
+ "inputs": {"key": "inputs", "type": "{JobInput}"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[CustomMetricThreshold]"},
+ "workspace_connection": {"key": "workspaceConnection", "type": "MonitoringWorkspaceConnection"},
}
def __init__(
self,
*,
- instance_count: int = 1,
- instance_type: Optional[str] = None,
- properties: Optional[Dict[str, JSON]] = None,
+ component_id: str,
+ metric_thresholds: List["_models.CustomMetricThreshold"],
+ workspace_connection: "_models.MonitoringWorkspaceConnection",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ input_assets: Optional[Dict[str, "_models.MonitoringInputDataBase"]] = None,
+ inputs: Optional[Dict[str, "_models.JobInput"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword instance_count: Optional number of instances or nodes used by the compute target.
- :paramtype instance_count: int
- :keyword instance_type: Optional type of VM used as supported by the compute target.
- :paramtype instance_type: str
- :keyword properties: Additional properties bag.
- :paramtype properties: dict[str, JSON]
- """
- super().__init__(instance_count=instance_count, instance_type=instance_type, properties=properties, **kwargs)
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword component_id: [Required] ARM resource ID of the component resource used to calculate
+ the custom metrics. Required.
+ :paramtype component_id: str
+ :keyword input_assets: Monitoring assets to take as input. Key is the component input port
+ name, value is the data asset.
+ :paramtype input_assets: dict[str,
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :keyword inputs: Extra component parameters to take as input. Key is the component literal
+ input port name, value is the parameter value.
+ :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :keyword metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.CustomMetricThreshold]
+ :keyword workspace_connection: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype workspace_connection:
+ ~azure.mgmt.machinelearningservices.models.MonitoringWorkspaceConnection
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "Custom"
+ self.component_id = component_id
+ self.input_assets = input_assets
+ self.inputs = inputs
+ self.metric_thresholds = metric_thresholds
+ self.workspace_connection = workspace_connection
-class DiagnoseRequestProperties(_serialization.Model):
- """DiagnoseRequestProperties.
+class CustomNCrossValidations(NCrossValidations):
+ """N-Cross validations are specified by user.
- :ivar udr: Setting for diagnosing user defined routing.
- :vartype udr: dict[str, JSON]
- :ivar nsg: Setting for diagnosing network security group.
- :vartype nsg: dict[str, JSON]
- :ivar resource_lock: Setting for diagnosing resource lock.
- :vartype resource_lock: dict[str, JSON]
- :ivar dns_resolution: Setting for diagnosing dns resolution.
- :vartype dns_resolution: dict[str, JSON]
- :ivar storage_account: Setting for diagnosing dependent storage account.
- :vartype storage_account: dict[str, JSON]
- :ivar key_vault: Setting for diagnosing dependent key vault.
- :vartype key_vault: dict[str, JSON]
- :ivar container_registry: Setting for diagnosing dependent container registry.
- :vartype container_registry: dict[str, JSON]
- :ivar application_insights: Setting for diagnosing dependent application insights.
- :vartype application_insights: dict[str, JSON]
- :ivar others: Setting for diagnosing unclassified category of problems.
- :vartype others: dict[str, JSON]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar mode: [Required] Mode for determining N-Cross validations. Required. Known values are:
+ "Auto" and "Custom".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.NCrossValidationsMode
+ :ivar value: [Required] N-Cross validations value. Required.
+ :vartype value: int
"""
+ _validation = {
+ "mode": {"required": True},
+ "value": {"required": True},
+ }
+
_attribute_map = {
- "udr": {"key": "udr", "type": "{object}"},
- "nsg": {"key": "nsg", "type": "{object}"},
- "resource_lock": {"key": "resourceLock", "type": "{object}"},
- "dns_resolution": {"key": "dnsResolution", "type": "{object}"},
- "storage_account": {"key": "storageAccount", "type": "{object}"},
- "key_vault": {"key": "keyVault", "type": "{object}"},
- "container_registry": {"key": "containerRegistry", "type": "{object}"},
- "application_insights": {"key": "applicationInsights", "type": "{object}"},
- "others": {"key": "others", "type": "{object}"},
+ "mode": {"key": "mode", "type": "str"},
+ "value": {"key": "value", "type": "int"},
}
- def __init__(
- self,
- *,
- udr: Optional[Dict[str, JSON]] = None,
- nsg: Optional[Dict[str, JSON]] = None,
- resource_lock: Optional[Dict[str, JSON]] = None,
- dns_resolution: Optional[Dict[str, JSON]] = None,
- storage_account: Optional[Dict[str, JSON]] = None,
- key_vault: Optional[Dict[str, JSON]] = None,
- container_registry: Optional[Dict[str, JSON]] = None,
- application_insights: Optional[Dict[str, JSON]] = None,
- others: Optional[Dict[str, JSON]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, value: int, **kwargs: Any) -> None:
"""
- :keyword udr: Setting for diagnosing user defined routing.
- :paramtype udr: dict[str, JSON]
- :keyword nsg: Setting for diagnosing network security group.
- :paramtype nsg: dict[str, JSON]
- :keyword resource_lock: Setting for diagnosing resource lock.
- :paramtype resource_lock: dict[str, JSON]
- :keyword dns_resolution: Setting for diagnosing dns resolution.
- :paramtype dns_resolution: dict[str, JSON]
- :keyword storage_account: Setting for diagnosing dependent storage account.
- :paramtype storage_account: dict[str, JSON]
- :keyword key_vault: Setting for diagnosing dependent key vault.
- :paramtype key_vault: dict[str, JSON]
- :keyword container_registry: Setting for diagnosing dependent container registry.
- :paramtype container_registry: dict[str, JSON]
- :keyword application_insights: Setting for diagnosing dependent application insights.
- :paramtype application_insights: dict[str, JSON]
- :keyword others: Setting for diagnosing unclassified category of problems.
- :paramtype others: dict[str, JSON]
+ :keyword value: [Required] N-Cross validations value. Required.
+ :paramtype value: int
"""
super().__init__(**kwargs)
- self.udr = udr
- self.nsg = nsg
- self.resource_lock = resource_lock
- self.dns_resolution = dns_resolution
- self.storage_account = storage_account
- self.key_vault = key_vault
- self.container_registry = container_registry
- self.application_insights = application_insights
- self.others = others
+ self.mode: str = "Custom"
+ self.value = value
-class DiagnoseResponseResult(_serialization.Model):
- """DiagnoseResponseResult.
+class CustomSeasonality(Seasonality):
+ """CustomSeasonality.
- :ivar value:
- :vartype value: ~azure.mgmt.machinelearningservices.models.DiagnoseResponseResultValue
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar mode: [Required] Seasonality mode. Required. Known values are: "Auto" and "Custom".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.SeasonalityMode
+ :ivar value: [Required] Seasonality value. Required.
+ :vartype value: int
"""
+ _validation = {
+ "mode": {"required": True},
+ "value": {"required": True},
+ }
+
_attribute_map = {
- "value": {"key": "value", "type": "DiagnoseResponseResultValue"},
+ "mode": {"key": "mode", "type": "str"},
+ "value": {"key": "value", "type": "int"},
}
- def __init__(self, *, value: Optional["_models.DiagnoseResponseResultValue"] = None, **kwargs: Any) -> None:
+ def __init__(self, *, value: int, **kwargs: Any) -> None:
"""
- :keyword value:
- :paramtype value: ~azure.mgmt.machinelearningservices.models.DiagnoseResponseResultValue
+ :keyword value: [Required] Seasonality value. Required.
+ :paramtype value: int
"""
super().__init__(**kwargs)
+ self.mode: str = "Custom"
self.value = value
-class DiagnoseResponseResultValue(_serialization.Model):
- """DiagnoseResponseResultValue.
+class CustomService(_serialization.Model):
+ """Specifies the custom service configuration.
- :ivar user_defined_route_results:
- :vartype user_defined_route_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :ivar network_security_rule_results:
- :vartype network_security_rule_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :ivar resource_lock_results:
- :vartype resource_lock_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :ivar dns_resolution_results:
- :vartype dns_resolution_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :ivar storage_account_results:
- :vartype storage_account_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :ivar key_vault_results:
- :vartype key_vault_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :ivar container_registry_results:
- :vartype container_registry_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :ivar application_insights_results:
- :vartype application_insights_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :ivar other_results:
- :vartype other_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :vartype additional_properties: dict[str, any]
+ :ivar name: Name of the Custom Service.
+ :vartype name: str
+ :ivar image: Describes the Image Specifications.
+ :vartype image: ~azure.mgmt.machinelearningservices.models.Image
+ :ivar environment_variables: Environment Variable for the container.
+ :vartype environment_variables: dict[str,
+ ~azure.mgmt.machinelearningservices.models.EnvironmentVariable]
+ :ivar docker: Describes the docker settings for the image.
+ :vartype docker: ~azure.mgmt.machinelearningservices.models.Docker
+ :ivar endpoints: Configuring the endpoints for the container.
+ :vartype endpoints: list[~azure.mgmt.machinelearningservices.models.Endpoint]
+ :ivar volumes: Configuring the volumes for the container.
+ :vartype volumes: list[~azure.mgmt.machinelearningservices.models.VolumeDefinition]
"""
_attribute_map = {
- "user_defined_route_results": {"key": "userDefinedRouteResults", "type": "[DiagnoseResult]"},
- "network_security_rule_results": {"key": "networkSecurityRuleResults", "type": "[DiagnoseResult]"},
- "resource_lock_results": {"key": "resourceLockResults", "type": "[DiagnoseResult]"},
- "dns_resolution_results": {"key": "dnsResolutionResults", "type": "[DiagnoseResult]"},
- "storage_account_results": {"key": "storageAccountResults", "type": "[DiagnoseResult]"},
- "key_vault_results": {"key": "keyVaultResults", "type": "[DiagnoseResult]"},
- "container_registry_results": {"key": "containerRegistryResults", "type": "[DiagnoseResult]"},
- "application_insights_results": {"key": "applicationInsightsResults", "type": "[DiagnoseResult]"},
- "other_results": {"key": "otherResults", "type": "[DiagnoseResult]"},
+ "additional_properties": {"key": "", "type": "{object}"},
+ "name": {"key": "name", "type": "str"},
+ "image": {"key": "image", "type": "Image"},
+ "environment_variables": {"key": "environmentVariables", "type": "{EnvironmentVariable}"},
+ "docker": {"key": "docker", "type": "Docker"},
+ "endpoints": {"key": "endpoints", "type": "[Endpoint]"},
+ "volumes": {"key": "volumes", "type": "[VolumeDefinition]"},
}
def __init__(
self,
*,
- user_defined_route_results: Optional[List["_models.DiagnoseResult"]] = None,
- network_security_rule_results: Optional[List["_models.DiagnoseResult"]] = None,
- resource_lock_results: Optional[List["_models.DiagnoseResult"]] = None,
- dns_resolution_results: Optional[List["_models.DiagnoseResult"]] = None,
- storage_account_results: Optional[List["_models.DiagnoseResult"]] = None,
- key_vault_results: Optional[List["_models.DiagnoseResult"]] = None,
- container_registry_results: Optional[List["_models.DiagnoseResult"]] = None,
- application_insights_results: Optional[List["_models.DiagnoseResult"]] = None,
- other_results: Optional[List["_models.DiagnoseResult"]] = None,
+ additional_properties: Optional[Dict[str, Any]] = None,
+ name: Optional[str] = None,
+ image: Optional["_models.Image"] = None,
+ environment_variables: Optional[Dict[str, "_models.EnvironmentVariable"]] = None,
+ docker: Optional["_models.Docker"] = None,
+ endpoints: Optional[List["_models.Endpoint"]] = None,
+ volumes: Optional[List["_models.VolumeDefinition"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword user_defined_route_results:
- :paramtype user_defined_route_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :keyword network_security_rule_results:
- :paramtype network_security_rule_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :keyword resource_lock_results:
- :paramtype resource_lock_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :keyword dns_resolution_results:
- :paramtype dns_resolution_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :keyword storage_account_results:
- :paramtype storage_account_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :keyword key_vault_results:
- :paramtype key_vault_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :keyword container_registry_results:
- :paramtype container_registry_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :keyword application_insights_results:
- :paramtype application_insights_results:
- list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
- :keyword other_results:
- :paramtype other_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ :keyword name: Name of the Custom Service.
+ :paramtype name: str
+ :keyword image: Describes the Image Specifications.
+ :paramtype image: ~azure.mgmt.machinelearningservices.models.Image
+ :keyword environment_variables: Environment Variable for the container.
+ :paramtype environment_variables: dict[str,
+ ~azure.mgmt.machinelearningservices.models.EnvironmentVariable]
+ :keyword docker: Describes the docker settings for the image.
+ :paramtype docker: ~azure.mgmt.machinelearningservices.models.Docker
+ :keyword endpoints: Configuring the endpoints for the container.
+ :paramtype endpoints: list[~azure.mgmt.machinelearningservices.models.Endpoint]
+ :keyword volumes: Configuring the volumes for the container.
+ :paramtype volumes: list[~azure.mgmt.machinelearningservices.models.VolumeDefinition]
"""
super().__init__(**kwargs)
- self.user_defined_route_results = user_defined_route_results
- self.network_security_rule_results = network_security_rule_results
- self.resource_lock_results = resource_lock_results
- self.dns_resolution_results = dns_resolution_results
- self.storage_account_results = storage_account_results
- self.key_vault_results = key_vault_results
- self.container_registry_results = container_registry_results
- self.application_insights_results = application_insights_results
- self.other_results = other_results
+ self.additional_properties = additional_properties
+ self.name = name
+ self.image = image
+ self.environment_variables = environment_variables
+ self.docker = docker
+ self.endpoints = endpoints
+ self.volumes = volumes
-class DiagnoseResult(_serialization.Model):
- """Result of Diagnose.
+class CustomTargetLags(TargetLags):
+ """CustomTargetLags.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar code: Code for workspace setup error.
- :vartype code: str
- :ivar level: Level of workspace setup error. Known values are: "Warning", "Error", and
- "Information".
- :vartype level: str or ~azure.mgmt.machinelearningservices.models.DiagnoseResultLevel
- :ivar message: Message of workspace setup error.
- :vartype message: str
+ :ivar mode: [Required] Set target lags mode - Auto/Custom. Required. Known values are: "Auto"
+ and "Custom".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetLagsMode
+ :ivar values: [Required] Set target lags values. Required.
+ :vartype values: list[int]
"""
_validation = {
- "code": {"readonly": True},
- "level": {"readonly": True},
- "message": {"readonly": True},
+ "mode": {"required": True},
+ "values": {"required": True},
}
_attribute_map = {
- "code": {"key": "code", "type": "str"},
- "level": {"key": "level", "type": "str"},
- "message": {"key": "message", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "values": {"key": "values", "type": "[int]"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(self, *, values: List[int], **kwargs: Any) -> None:
+ """
+ :keyword values: [Required] Set target lags values. Required.
+ :paramtype values: list[int]
+ """
super().__init__(**kwargs)
- self.code = None
- self.level = None
- self.message = None
+ self.mode: str = "Custom"
+ self.values = values
-class DiagnoseWorkspaceParameters(_serialization.Model):
- """Parameters to diagnose a workspace.
+class CustomTargetRollingWindowSize(TargetRollingWindowSize):
+ """CustomTargetRollingWindowSize.
- :ivar value: Value of Parameters.
- :vartype value: ~azure.mgmt.machinelearningservices.models.DiagnoseRequestProperties
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar mode: [Required] TargetRollingWindowSiz detection mode. Required. Known values are:
+ "Auto" and "Custom".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSizeMode
+ :ivar value: [Required] TargetRollingWindowSize value. Required.
+ :vartype value: int
"""
+ _validation = {
+ "mode": {"required": True},
+ "value": {"required": True},
+ }
+
_attribute_map = {
- "value": {"key": "value", "type": "DiagnoseRequestProperties"},
+ "mode": {"key": "mode", "type": "str"},
+ "value": {"key": "value", "type": "int"},
}
- def __init__(self, *, value: Optional["_models.DiagnoseRequestProperties"] = None, **kwargs: Any) -> None:
+ def __init__(self, *, value: int, **kwargs: Any) -> None:
"""
- :keyword value: Value of Parameters.
- :paramtype value: ~azure.mgmt.machinelearningservices.models.DiagnoseRequestProperties
+ :keyword value: [Required] TargetRollingWindowSize value. Required.
+ :paramtype value: int
"""
super().__init__(**kwargs)
+ self.mode: str = "Custom"
self.value = value
-class DistributionConfiguration(_serialization.Model):
- """Base definition for job distribution configuration.
+class DataImportSource(_serialization.Model):
+ """DataImportSource.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- Mpi, PyTorch, TensorFlow
+ DatabaseSource, FileSystemSource
All required parameters must be populated in order to send to Azure.
- :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
- Known values are: "PyTorch", "TensorFlow", and "Mpi".
- :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
+ :ivar connection: Workspace connection for data import source storage.
+ :vartype connection: str
+ :ivar source_type: [Required] Specifies the type of data. Required. Known values are:
+ "database" and "file_system".
+ :vartype source_type: str or ~azure.mgmt.machinelearningservices.models.DataImportSourceType
"""
_validation = {
- "distribution_type": {"required": True},
+ "source_type": {"required": True},
}
_attribute_map = {
- "distribution_type": {"key": "distributionType", "type": "str"},
+ "connection": {"key": "connection", "type": "str"},
+ "source_type": {"key": "sourceType", "type": "str"},
}
- _subtype_map = {"distribution_type": {"Mpi": "Mpi", "PyTorch": "PyTorch", "TensorFlow": "TensorFlow"}}
+ _subtype_map = {"source_type": {"database": "DatabaseSource", "file_system": "FileSystemSource"}}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(self, *, connection: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword connection: Workspace connection for data import source storage.
+ :paramtype connection: str
+ """
super().__init__(**kwargs)
- self.distribution_type: Optional[str] = None
+ self.connection = connection
+ self.source_type: Optional[str] = None
-class Docker(_serialization.Model):
- """Docker container configuration.
+class DatabaseSource(DataImportSource):
+ """DatabaseSource.
- :ivar additional_properties: Unmatched properties from the message are deserialized to this
- collection.
- :vartype additional_properties: dict[str, any]
- :ivar privileged: Indicate whether container shall run in privileged or non-privileged mode.
- :vartype privileged: bool
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar connection: Workspace connection for data import source storage.
+ :vartype connection: str
+ :ivar source_type: [Required] Specifies the type of data. Required. Known values are:
+ "database" and "file_system".
+ :vartype source_type: str or ~azure.mgmt.machinelearningservices.models.DataImportSourceType
+ :ivar query: SQL Query statement for data import Database source.
+ :vartype query: str
+ :ivar stored_procedure: SQL StoredProcedure on data import Database source.
+ :vartype stored_procedure: str
+ :ivar stored_procedure_params: SQL StoredProcedure parameters.
+ :vartype stored_procedure_params: list[dict[str, str]]
+ :ivar table_name: Name of the table on data import Database source.
+ :vartype table_name: str
"""
+ _validation = {
+ "source_type": {"required": True},
+ }
+
_attribute_map = {
- "additional_properties": {"key": "", "type": "{object}"},
- "privileged": {"key": "privileged", "type": "bool"},
+ "connection": {"key": "connection", "type": "str"},
+ "source_type": {"key": "sourceType", "type": "str"},
+ "query": {"key": "query", "type": "str"},
+ "stored_procedure": {"key": "storedProcedure", "type": "str"},
+ "stored_procedure_params": {"key": "storedProcedureParams", "type": "[{str}]"},
+ "table_name": {"key": "tableName", "type": "str"},
}
def __init__(
self,
*,
- additional_properties: Optional[Dict[str, Any]] = None,
- privileged: Optional[bool] = None,
+ connection: Optional[str] = None,
+ query: Optional[str] = None,
+ stored_procedure: Optional[str] = None,
+ stored_procedure_params: Optional[List[Dict[str, str]]] = None,
+ table_name: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
- collection.
- :paramtype additional_properties: dict[str, any]
- :keyword privileged: Indicate whether container shall run in privileged or non-privileged mode.
- :paramtype privileged: bool
- """
- super().__init__(**kwargs)
- self.additional_properties = additional_properties
- self.privileged = privileged
-
+ :keyword connection: Workspace connection for data import source storage.
+ :paramtype connection: str
+ :keyword query: SQL Query statement for data import Database source.
+ :paramtype query: str
+ :keyword stored_procedure: SQL StoredProcedure on data import Database source.
+ :paramtype stored_procedure: str
+ :keyword stored_procedure_params: SQL StoredProcedure parameters.
+ :paramtype stored_procedure_params: list[dict[str, str]]
+ :keyword table_name: Name of the table on data import Database source.
+ :paramtype table_name: str
+ """
+ super().__init__(connection=connection, **kwargs)
+ self.source_type: str = "database"
+ self.query = query
+ self.stored_procedure = stored_procedure
+ self.stored_procedure_params = stored_procedure_params
+ self.table_name = table_name
-class EncryptionKeyVaultProperties(_serialization.Model):
- """EncryptionKeyVaultProperties.
- All required parameters must be populated in order to send to Azure.
+class DatabricksSchema(_serialization.Model):
+ """DatabricksSchema.
- :ivar key_vault_arm_id: The ArmId of the keyVault where the customer owned encryption key is
- present. Required.
- :vartype key_vault_arm_id: str
- :ivar key_identifier: Key vault uri to access the encryption key. Required.
- :vartype key_identifier: str
- :ivar identity_client_id: For future use - The client id of the identity which will be used to
- access key vault.
- :vartype identity_client_id: str
+ :ivar properties: Properties of Databricks.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
"""
- _validation = {
- "key_vault_arm_id": {"required": True},
- "key_identifier": {"required": True},
- }
-
_attribute_map = {
- "key_vault_arm_id": {"key": "keyVaultArmId", "type": "str"},
- "key_identifier": {"key": "keyIdentifier", "type": "str"},
- "identity_client_id": {"key": "identityClientId", "type": "str"},
+ "properties": {"key": "properties", "type": "DatabricksProperties"},
}
- def __init__(
- self, *, key_vault_arm_id: str, key_identifier: str, identity_client_id: Optional[str] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, properties: Optional["_models.DatabricksProperties"] = None, **kwargs: Any) -> None:
"""
- :keyword key_vault_arm_id: The ArmId of the keyVault where the customer owned encryption key is
- present. Required.
- :paramtype key_vault_arm_id: str
- :keyword key_identifier: Key vault uri to access the encryption key. Required.
- :paramtype key_identifier: str
- :keyword identity_client_id: For future use - The client id of the identity which will be used
- to access key vault.
- :paramtype identity_client_id: str
+ :keyword properties: Properties of Databricks.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
"""
super().__init__(**kwargs)
- self.key_vault_arm_id = key_vault_arm_id
- self.key_identifier = key_identifier
- self.identity_client_id = identity_client_id
+ self.properties = properties
-class EncryptionProperty(_serialization.Model):
- """EncryptionProperty.
+class Databricks(Compute, DatabricksSchema): # pylint: disable=too-many-instance-attributes
+ """A DataFactory compute.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar status: Indicates whether or not the encryption is enabled for the workspace. Required.
- Known values are: "Enabled" and "Disabled".
- :vartype status: str or ~azure.mgmt.machinelearningservices.models.EncryptionStatus
- :ivar identity: The identity that will be used to access the key vault for encryption at rest.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityForCmk
- :ivar key_vault_properties: Customer Key vault properties. Required.
- :vartype key_vault_properties:
- ~azure.mgmt.machinelearningservices.models.EncryptionKeyVaultProperties
+ :ivar properties: Properties of Databricks.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
+ :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
+ "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
+ "DataLakeAnalytics", and "SynapseSpark".
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
+ :ivar compute_location: Location for the underlying compute.
+ :vartype compute_location: str
+ :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
+ Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
+ "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningState
+ :ivar description: The description of the Machine Learning compute.
+ :vartype description: str
+ :ivar created_on: The time at which the compute was created.
+ :vartype created_on: ~datetime.datetime
+ :ivar modified_on: The time at which the compute was last modified.
+ :vartype modified_on: ~datetime.datetime
+ :ivar resource_id: ARM resource id of the underlying compute.
+ :vartype resource_id: str
+ :ivar provisioning_errors: Errors during provisioning.
+ :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
+ :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
+ from outside if true, or machine learning service provisioned it if false.
+ :vartype is_attached_compute: bool
+ :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
+ and AAD exclusively for authentication.
+ :vartype disable_local_auth: bool
"""
_validation = {
- "status": {"required": True},
- "key_vault_properties": {"required": True},
+ "compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "created_on": {"readonly": True},
+ "modified_on": {"readonly": True},
+ "provisioning_errors": {"readonly": True},
+ "is_attached_compute": {"readonly": True},
}
_attribute_map = {
- "status": {"key": "status", "type": "str"},
- "identity": {"key": "identity", "type": "IdentityForCmk"},
- "key_vault_properties": {"key": "keyVaultProperties", "type": "EncryptionKeyVaultProperties"},
+ "properties": {"key": "properties", "type": "DatabricksProperties"},
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_location": {"key": "computeLocation", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "created_on": {"key": "createdOn", "type": "iso-8601"},
+ "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
+ "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
+ "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
}
def __init__(
self,
*,
- status: Union[str, "_models.EncryptionStatus"],
- key_vault_properties: "_models.EncryptionKeyVaultProperties",
- identity: Optional["_models.IdentityForCmk"] = None,
+ properties: Optional["_models.DatabricksProperties"] = None,
+ compute_location: Optional[str] = None,
+ description: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ disable_local_auth: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
- :keyword status: Indicates whether or not the encryption is enabled for the workspace.
- Required. Known values are: "Enabled" and "Disabled".
- :paramtype status: str or ~azure.mgmt.machinelearningservices.models.EncryptionStatus
- :keyword identity: The identity that will be used to access the key vault for encryption at
- rest.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityForCmk
- :keyword key_vault_properties: Customer Key vault properties. Required.
- :paramtype key_vault_properties:
- ~azure.mgmt.machinelearningservices.models.EncryptionKeyVaultProperties
+ :keyword properties: Properties of Databricks.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
+ :keyword compute_location: Location for the underlying compute.
+ :paramtype compute_location: str
+ :keyword description: The description of the Machine Learning compute.
+ :paramtype description: str
+ :keyword resource_id: ARM resource id of the underlying compute.
+ :paramtype resource_id: str
+ :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
+ MSI and AAD exclusively for authentication.
+ :paramtype disable_local_auth: bool
"""
- super().__init__(**kwargs)
- self.status = status
- self.identity = identity
- self.key_vault_properties = key_vault_properties
+ super().__init__(
+ compute_location=compute_location,
+ description=description,
+ resource_id=resource_id,
+ disable_local_auth=disable_local_auth,
+ properties=properties,
+ **kwargs
+ )
+ self.properties = properties
+ self.compute_type: str = "Databricks"
+ self.compute_location = compute_location
+ self.provisioning_state = None
+ self.description = description
+ self.created_on = None
+ self.modified_on = None
+ self.resource_id = resource_id
+ self.provisioning_errors = None
+ self.is_attached_compute = None
+ self.disable_local_auth = disable_local_auth
-class Endpoint(_serialization.Model):
- """Describes the endpoint configuration for the container.
+class DatabricksComputeSecretsProperties(_serialization.Model):
+ """Properties of Databricks Compute Secrets.
- :ivar protocol: Protocol over which communication will happen over this endpoint. Known values
- are: "tcp", "udp", and "http".
- :vartype protocol: str or ~azure.mgmt.machinelearningservices.models.Protocol
- :ivar name: Name of the Endpoint.
- :vartype name: str
- :ivar target: Application port inside the container.
- :vartype target: int
- :ivar published: Port over which the application is exposed from container.
- :vartype published: int
- :ivar host_ip: Host IP over which the application is exposed from the container.
- :vartype host_ip: str
+ :ivar databricks_access_token: access token for databricks account.
+ :vartype databricks_access_token: str
"""
_attribute_map = {
- "protocol": {"key": "protocol", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "target": {"key": "target", "type": "int"},
- "published": {"key": "published", "type": "int"},
- "host_ip": {"key": "hostIp", "type": "str"},
+ "databricks_access_token": {"key": "databricksAccessToken", "type": "str"},
}
- def __init__(
- self,
- *,
- protocol: Union[str, "_models.Protocol"] = "tcp",
- name: Optional[str] = None,
- target: Optional[int] = None,
- published: Optional[int] = None,
- host_ip: Optional[str] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, databricks_access_token: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword protocol: Protocol over which communication will happen over this endpoint. Known
- values are: "tcp", "udp", and "http".
- :paramtype protocol: str or ~azure.mgmt.machinelearningservices.models.Protocol
- :keyword name: Name of the Endpoint.
- :paramtype name: str
- :keyword target: Application port inside the container.
- :paramtype target: int
- :keyword published: Port over which the application is exposed from container.
- :paramtype published: int
- :keyword host_ip: Host IP over which the application is exposed from the container.
- :paramtype host_ip: str
+ :keyword databricks_access_token: access token for databricks account.
+ :paramtype databricks_access_token: str
"""
super().__init__(**kwargs)
- self.protocol = protocol
- self.name = name
- self.target = target
- self.published = published
- self.host_ip = host_ip
+ self.databricks_access_token = databricks_access_token
-class EndpointAuthKeys(_serialization.Model):
- """Keys for endpoint authentication.
+class DatabricksComputeSecrets(ComputeSecrets, DatabricksComputeSecretsProperties):
+ """Secrets related to a Machine Learning compute based on Databricks.
- :ivar primary_key: The primary key.
- :vartype primary_key: str
- :ivar secondary_key: The secondary key.
- :vartype secondary_key: str
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar databricks_access_token: access token for databricks account.
+ :vartype databricks_access_token: str
+ :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
+ "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
+ "DataLakeAnalytics", and "SynapseSpark".
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
"""
+ _validation = {
+ "compute_type": {"required": True},
+ }
+
_attribute_map = {
- "primary_key": {"key": "primaryKey", "type": "str"},
- "secondary_key": {"key": "secondaryKey", "type": "str"},
+ "databricks_access_token": {"key": "databricksAccessToken", "type": "str"},
+ "compute_type": {"key": "computeType", "type": "str"},
}
- def __init__(
- self, *, primary_key: Optional[str] = None, secondary_key: Optional[str] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, databricks_access_token: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword primary_key: The primary key.
- :paramtype primary_key: str
- :keyword secondary_key: The secondary key.
- :paramtype secondary_key: str
+ :keyword databricks_access_token: access token for databricks account.
+ :paramtype databricks_access_token: str
"""
- super().__init__(**kwargs)
- self.primary_key = primary_key
- self.secondary_key = secondary_key
+ super().__init__(databricks_access_token=databricks_access_token, **kwargs)
+ self.databricks_access_token = databricks_access_token
+ self.compute_type: str = "Databricks"
-class EndpointAuthToken(_serialization.Model):
- """Service Token.
+class DatabricksProperties(_serialization.Model):
+ """Properties of Databricks.
- :ivar access_token: Access token for endpoint authentication.
- :vartype access_token: str
- :ivar expiry_time_utc: Access token expiry time (UTC).
- :vartype expiry_time_utc: int
- :ivar refresh_after_time_utc: Refresh access token after time (UTC).
- :vartype refresh_after_time_utc: int
- :ivar token_type: Access token type.
- :vartype token_type: str
+ :ivar databricks_access_token: Databricks access token.
+ :vartype databricks_access_token: str
+ :ivar workspace_url: Workspace Url.
+ :vartype workspace_url: str
"""
_attribute_map = {
- "access_token": {"key": "accessToken", "type": "str"},
- "expiry_time_utc": {"key": "expiryTimeUtc", "type": "int"},
- "refresh_after_time_utc": {"key": "refreshAfterTimeUtc", "type": "int"},
- "token_type": {"key": "tokenType", "type": "str"},
+ "databricks_access_token": {"key": "databricksAccessToken", "type": "str"},
+ "workspace_url": {"key": "workspaceUrl", "type": "str"},
}
def __init__(
- self,
- *,
- access_token: Optional[str] = None,
- expiry_time_utc: int = 0,
- refresh_after_time_utc: int = 0,
- token_type: Optional[str] = None,
- **kwargs: Any
+ self, *, databricks_access_token: Optional[str] = None, workspace_url: Optional[str] = None, **kwargs: Any
) -> None:
"""
- :keyword access_token: Access token for endpoint authentication.
- :paramtype access_token: str
- :keyword expiry_time_utc: Access token expiry time (UTC).
- :paramtype expiry_time_utc: int
- :keyword refresh_after_time_utc: Refresh access token after time (UTC).
- :paramtype refresh_after_time_utc: int
- :keyword token_type: Access token type.
- :paramtype token_type: str
+ :keyword databricks_access_token: Databricks access token.
+ :paramtype databricks_access_token: str
+ :keyword workspace_url: Workspace Url.
+ :paramtype workspace_url: str
"""
super().__init__(**kwargs)
- self.access_token = access_token
- self.expiry_time_utc = expiry_time_utc
- self.refresh_after_time_utc = refresh_after_time_utc
- self.token_type = token_type
-
-
-class ScheduleActionBase(_serialization.Model):
- """ScheduleActionBase.
-
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- JobScheduleAction, EndpointScheduleAction
-
- All required parameters must be populated in order to send to Azure.
-
- :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
- are: "CreateJob" and "InvokeBatchEndpoint".
- :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
- """
-
- _validation = {
- "action_type": {"required": True},
- }
-
- _attribute_map = {
- "action_type": {"key": "actionType", "type": "str"},
- }
-
- _subtype_map = {"action_type": {"CreateJob": "JobScheduleAction", "InvokeBatchEndpoint": "EndpointScheduleAction"}}
-
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.action_type: Optional[str] = None
+ self.databricks_access_token = databricks_access_token
+ self.workspace_url = workspace_url
-class EndpointScheduleAction(ScheduleActionBase):
- """EndpointScheduleAction.
+class DataCollector(_serialization.Model):
+ """DataCollector.
All required parameters must be populated in order to send to Azure.
- :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
- are: "CreateJob" and "InvokeBatchEndpoint".
- :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
- :ivar endpoint_invocation_definition: [Required] Defines Schedule action definition details.
-
-
- .. raw:: html
-
- . Required.
- :vartype endpoint_invocation_definition: JSON
+ :ivar collections: [Required] The collection configuration. Each collection has it own
+ configuration to collect model data and the name of collection can be arbitrary string.
+ Model data collector can be used for either payload logging or custom logging or both of them.
+ Collection request and response are reserved for payload logging, others are for custom
+ logging. Required.
+ :vartype collections: dict[str, ~azure.mgmt.machinelearningservices.models.Collection]
+ :ivar request_logging: The request logging configuration for mdc, it includes advanced logging
+ settings for all collections. It's optional.
+ :vartype request_logging: ~azure.mgmt.machinelearningservices.models.RequestLogging
+ :ivar rolling_rate: When model data is collected to blob storage, we need to roll the data to
+ different path to avoid logging all of them in a single blob file.
+ If the rolling rate is hour, all data will be collected in the blob path /yyyy/MM/dd/HH/.
+ If it's day, all data will be collected in blob path /yyyy/MM/dd/.
+ The other benefit of rolling path is that model monitoring ui is able to select a time range
+ of data very quickly. Known values are: "Year", "Month", "Day", "Hour", and "Minute".
+ :vartype rolling_rate: str or ~azure.mgmt.machinelearningservices.models.RollingRateType
"""
_validation = {
- "action_type": {"required": True},
- "endpoint_invocation_definition": {"required": True},
+ "collections": {"required": True},
}
_attribute_map = {
- "action_type": {"key": "actionType", "type": "str"},
- "endpoint_invocation_definition": {"key": "endpointInvocationDefinition", "type": "object"},
+ "collections": {"key": "collections", "type": "{Collection}"},
+ "request_logging": {"key": "requestLogging", "type": "RequestLogging"},
+ "rolling_rate": {"key": "rollingRate", "type": "str"},
}
- def __init__(self, *, endpoint_invocation_definition: JSON, **kwargs: Any) -> None:
- """
- :keyword endpoint_invocation_definition: [Required] Defines Schedule action definition details.
-
-
- .. raw:: html
-
- . Required.
- :paramtype endpoint_invocation_definition: JSON
+ def __init__(
+ self,
+ *,
+ collections: Dict[str, "_models.Collection"],
+ request_logging: Optional["_models.RequestLogging"] = None,
+ rolling_rate: Optional[Union[str, "_models.RollingRateType"]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- super().__init__(**kwargs)
- self.action_type: str = "InvokeBatchEndpoint"
- self.endpoint_invocation_definition = endpoint_invocation_definition
-
-
-class EnvironmentContainer(Resource):
+ :keyword collections: [Required] The collection configuration. Each collection has it own
+ configuration to collect model data and the name of collection can be arbitrary string.
+ Model data collector can be used for either payload logging or custom logging or both of them.
+ Collection request and response are reserved for payload logging, others are for custom
+ logging. Required.
+ :paramtype collections: dict[str, ~azure.mgmt.machinelearningservices.models.Collection]
+ :keyword request_logging: The request logging configuration for mdc, it includes advanced
+ logging settings for all collections. It's optional.
+ :paramtype request_logging: ~azure.mgmt.machinelearningservices.models.RequestLogging
+ :keyword rolling_rate: When model data is collected to blob storage, we need to roll the data
+ to different path to avoid logging all of them in a single blob file.
+ If the rolling rate is hour, all data will be collected in the blob path /yyyy/MM/dd/HH/.
+ If it's day, all data will be collected in blob path /yyyy/MM/dd/.
+ The other benefit of rolling path is that model monitoring ui is able to select a time range
+ of data very quickly. Known values are: "Year", "Month", "Day", "Hour", and "Minute".
+ :paramtype rolling_rate: str or ~azure.mgmt.machinelearningservices.models.RollingRateType
+ """
+ super().__init__(**kwargs)
+ self.collections = collections
+ self.request_logging = request_logging
+ self.rolling_rate = rolling_rate
+
+
+class DataContainer(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -8709,7 +9947,7 @@ class EnvironmentContainer(Resource):
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentContainerProperties
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.DataContainerProperties
"""
_validation = {
@@ -8725,24 +9963,25 @@ class EnvironmentContainer(Resource):
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "EnvironmentContainerProperties"},
+ "properties": {"key": "properties", "type": "DataContainerProperties"},
}
- def __init__(self, *, properties: "_models.EnvironmentContainerProperties", **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.DataContainerProperties", **kwargs: Any) -> None:
"""
:keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties:
- ~azure.mgmt.machinelearningservices.models.EnvironmentContainerProperties
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.DataContainerProperties
"""
super().__init__(**kwargs)
self.properties = properties
-class EnvironmentContainerProperties(AssetContainer):
- """Container for environment specification versions.
+class DataContainerProperties(AssetContainer):
+ """Container for data asset versions.
Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
+
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
@@ -8755,16 +9994,15 @@ class EnvironmentContainerProperties(AssetContainer):
:vartype latest_version: str
:ivar next_version: The next auto incremental version.
:vartype next_version: str
- :ivar provisioning_state: Provisioning state for the environment container. Known values are:
- "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
+ "uri_folder", and "mltable".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
"""
_validation = {
"latest_version": {"readonly": True},
"next_version": {"readonly": True},
- "provisioning_state": {"readonly": True},
+ "data_type": {"required": True},
}
_attribute_map = {
@@ -8774,12 +10012,13 @@ class EnvironmentContainerProperties(AssetContainer):
"is_archived": {"key": "isArchived", "type": "bool"},
"latest_version": {"key": "latestVersion", "type": "str"},
"next_version": {"key": "nextVersion", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "data_type": {"key": "dataType", "type": "str"},
}
def __init__(
self,
*,
+ data_type: Union[str, "_models.DataType"],
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
@@ -8795,139 +10034,245 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
+ :keyword data_type: [Required] Specifies the type of data. Required. Known values are:
+ "uri_file", "uri_folder", and "mltable".
+ :paramtype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
"""
super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
- self.provisioning_state = None
+ self.data_type = data_type
-class EnvironmentContainerResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of EnvironmentContainer entities.
+class DataContainerResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of DataContainer entities.
- :ivar next_link: The link to the next page of EnvironmentContainer objects. If null, there are
- no additional pages.
+ :ivar next_link: The link to the next page of DataContainer objects. If null, there are no
+ additional pages.
:vartype next_link: str
- :ivar value: An array of objects of type EnvironmentContainer.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentContainer]
+ :ivar value: An array of objects of type DataContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.DataContainer]
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[EnvironmentContainer]"},
+ "value": {"key": "value", "type": "[DataContainer]"},
}
def __init__(
- self,
- *,
- next_link: Optional[str] = None,
- value: Optional[List["_models.EnvironmentContainer"]] = None,
- **kwargs: Any
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.DataContainer"]] = None, **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of EnvironmentContainer objects. If null, there
- are no additional pages.
+ :keyword next_link: The link to the next page of DataContainer objects. If null, there are no
+ additional pages.
:paramtype next_link: str
- :keyword value: An array of objects of type EnvironmentContainer.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentContainer]
+ :keyword value: An array of objects of type DataContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.DataContainer]
"""
super().__init__(**kwargs)
self.next_link = next_link
self.value = value
-class EnvironmentVariable(_serialization.Model):
- """Environment Variables for the container.
+class DataDriftMonitoringSignal(MonitoringSignalBase):
+ """DataDriftMonitoringSignal.
- :ivar additional_properties: Unmatched properties from the message are deserialized to this
- collection.
- :vartype additional_properties: dict[str, any]
- :ivar type: Type of the Environment Variable. Possible values are: local - For local variable.
- "local"
- :vartype type: str or ~azure.mgmt.machinelearningservices.models.EnvironmentVariableType
- :ivar value: Value of the Environment variable.
- :vartype value: str
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", "Custom",
+ "ModelPerformance", "GenerationSafetyQuality", and "GenerationTokenStatistics".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar data_segment: The data segment used for scoping on a subset of the data population.
+ :vartype data_segment: ~azure.mgmt.machinelearningservices.models.MonitoringDataSegment
+ :ivar feature_data_type_override: A dictionary that maps feature names to their respective data
+ types.
+ :vartype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :ivar feature_importance_settings: The settings for computing feature importance.
+ :vartype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :ivar features: The feature filter which identifies which feature to calculate drift over.
+ :vartype features: ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterBase
+ :ivar metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.DataDriftMetricThresholdBase]
+ :ivar production_data: [Required] The data which drift will be calculated for. Required.
+ :vartype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :ivar reference_data: [Required] The data to calculate drift against. Required.
+ :vartype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
"""
+ _validation = {
+ "signal_type": {"required": True},
+ "metric_thresholds": {"required": True},
+ "production_data": {"required": True},
+ "reference_data": {"required": True},
+ }
+
_attribute_map = {
- "additional_properties": {"key": "", "type": "{object}"},
- "type": {"key": "type", "type": "str"},
- "value": {"key": "value", "type": "str"},
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "data_segment": {"key": "dataSegment", "type": "MonitoringDataSegment"},
+ "feature_data_type_override": {"key": "featureDataTypeOverride", "type": "{str}"},
+ "feature_importance_settings": {"key": "featureImportanceSettings", "type": "FeatureImportanceSettings"},
+ "features": {"key": "features", "type": "MonitoringFeatureFilterBase"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[DataDriftMetricThresholdBase]"},
+ "production_data": {"key": "productionData", "type": "MonitoringInputDataBase"},
+ "reference_data": {"key": "referenceData", "type": "MonitoringInputDataBase"},
}
def __init__(
self,
*,
- additional_properties: Optional[Dict[str, Any]] = None,
- type: Union[str, "_models.EnvironmentVariableType"] = "local",
- value: Optional[str] = None,
+ metric_thresholds: List["_models.DataDriftMetricThresholdBase"],
+ production_data: "_models.MonitoringInputDataBase",
+ reference_data: "_models.MonitoringInputDataBase",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ data_segment: Optional["_models.MonitoringDataSegment"] = None,
+ feature_data_type_override: Optional[Dict[str, Union[str, "_models.MonitoringFeatureDataType"]]] = None,
+ feature_importance_settings: Optional["_models.FeatureImportanceSettings"] = None,
+ features: Optional["_models.MonitoringFeatureFilterBase"] = None,
**kwargs: Any
) -> None:
"""
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
- collection.
- :paramtype additional_properties: dict[str, any]
- :keyword type: Type of the Environment Variable. Possible values are: local - For local
- variable. "local"
- :paramtype type: str or ~azure.mgmt.machinelearningservices.models.EnvironmentVariableType
- :keyword value: Value of the Environment variable.
- :paramtype value: str
- """
- super().__init__(**kwargs)
- self.additional_properties = additional_properties
- self.type = type
- self.value = value
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword data_segment: The data segment used for scoping on a subset of the data population.
+ :paramtype data_segment: ~azure.mgmt.machinelearningservices.models.MonitoringDataSegment
+ :keyword feature_data_type_override: A dictionary that maps feature names to their respective
+ data types.
+ :paramtype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :keyword feature_importance_settings: The settings for computing feature importance.
+ :paramtype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :keyword features: The feature filter which identifies which feature to calculate drift over.
+ :paramtype features: ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterBase
+ :keyword metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.DataDriftMetricThresholdBase]
+ :keyword production_data: [Required] The data which drift will be calculated for. Required.
+ :paramtype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :keyword reference_data: [Required] The data to calculate drift against. Required.
+ :paramtype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "DataDrift"
+ self.data_segment = data_segment
+ self.feature_data_type_override = feature_data_type_override
+ self.feature_importance_settings = feature_importance_settings
+ self.features = features
+ self.metric_thresholds = metric_thresholds
+ self.production_data = production_data
+ self.reference_data = reference_data
-class EnvironmentVersion(Resource):
- """Azure Resource Manager resource envelope.
+class DataFactory(Compute):
+ """A DataFactory compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentVersionProperties
+ :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
+ "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
+ "DataLakeAnalytics", and "SynapseSpark".
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
+ :ivar compute_location: Location for the underlying compute.
+ :vartype compute_location: str
+ :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
+ Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
+ "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningState
+ :ivar description: The description of the Machine Learning compute.
+ :vartype description: str
+ :ivar created_on: The time at which the compute was created.
+ :vartype created_on: ~datetime.datetime
+ :ivar modified_on: The time at which the compute was last modified.
+ :vartype modified_on: ~datetime.datetime
+ :ivar resource_id: ARM resource id of the underlying compute.
+ :vartype resource_id: str
+ :ivar provisioning_errors: Errors during provisioning.
+ :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
+ :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
+ from outside if true, or machine learning service provisioned it if false.
+ :vartype is_attached_compute: bool
+ :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
+ and AAD exclusively for authentication.
+ :vartype disable_local_auth: bool
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
+ "compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "created_on": {"readonly": True},
+ "modified_on": {"readonly": True},
+ "provisioning_errors": {"readonly": True},
+ "is_attached_compute": {"readonly": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "EnvironmentVersionProperties"},
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_location": {"key": "computeLocation", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "created_on": {"key": "createdOn", "type": "iso-8601"},
+ "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
+ "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
+ "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
}
- def __init__(self, *, properties: "_models.EnvironmentVersionProperties", **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ compute_location: Optional[str] = None,
+ description: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ disable_local_auth: Optional[bool] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentVersionProperties
+ :keyword compute_location: Location for the underlying compute.
+ :paramtype compute_location: str
+ :keyword description: The description of the Machine Learning compute.
+ :paramtype description: str
+ :keyword resource_id: ARM resource id of the underlying compute.
+ :paramtype resource_id: str
+ :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
+ MSI and AAD exclusively for authentication.
+ :paramtype disable_local_auth: bool
"""
- super().__init__(**kwargs)
- self.properties = properties
+ super().__init__(
+ compute_location=compute_location,
+ description=description,
+ resource_id=resource_id,
+ disable_local_auth=disable_local_auth,
+ **kwargs
+ )
+ self.compute_type: str = "DataFactory"
-class EnvironmentVersionProperties(AssetBase): # pylint: disable=too-many-instance-attributes
- """Environment version details.
+class DataVersionBaseProperties(AssetBase):
+ """Data version base definition.
- Variables are only populated by the server, and will be ignored when sending a request.
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ MLTableData, UriFileDataVersion, UriFolderDataVersion
+
+ All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
@@ -8935,93 +10280,60 @@ class EnvironmentVersionProperties(AssetBase): # pylint: disable=too-many-insta
:vartype properties: dict[str, str]
:ivar tags: Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:vartype is_archived: bool
- :ivar auto_rebuild: Defines if image needs to be rebuilt based on base image changes. Known
- values are: "Disabled" and "OnBaseImageUpdate".
- :vartype auto_rebuild: str or ~azure.mgmt.machinelearningservices.models.AutoRebuildSetting
- :ivar build: Configuration settings for Docker build context.
- :vartype build: ~azure.mgmt.machinelearningservices.models.BuildContext
- :ivar conda_file: Standard configuration file used by Conda that lets you install any kind of
- package, including Python, R, and C/C++ packages.
-
-
- .. raw:: html
-
- .
- :vartype conda_file: str
- :ivar environment_type: Environment type is either user managed or curated by the Azure ML
- service
-
-
- .. raw:: html
-
- . Known values are: "Curated" and "UserCreated".
- :vartype environment_type: str or ~azure.mgmt.machinelearningservices.models.EnvironmentType
- :ivar image: Name of the image that will be used for the environment.
-
-
- .. raw:: html
-
- .
- :vartype image: str
- :ivar inference_config: Defines configuration specific to inference.
- :vartype inference_config:
- ~azure.mgmt.machinelearningservices.models.InferenceContainerProperties
- :ivar os_type: The OS type of the environment. Known values are: "Linux" and "Windows".
- :vartype os_type: str or ~azure.mgmt.machinelearningservices.models.OperatingSystemType
- :ivar provisioning_state: Provisioning state for the environment version. Known values are:
- "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
- :ivar stage: Stage in the environment lifecycle assigned to this environment.
+ :ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
+ "uri_folder", and "mltable".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
+ :ivar data_uri: [Required] Uri of the data. Example:
+ https://go.microsoft.com/fwlink/?linkid=2202330. Required.
+ :vartype data_uri: str
+ :ivar intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :ivar stage: Stage in the data lifecycle assigned to this data asset.
:vartype stage: str
"""
_validation = {
- "environment_type": {"readonly": True},
- "provisioning_state": {"readonly": True},
+ "data_type": {"required": True},
+ "data_uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
"description": {"key": "description", "type": "str"},
"properties": {"key": "properties", "type": "{str}"},
"tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
"is_anonymous": {"key": "isAnonymous", "type": "bool"},
"is_archived": {"key": "isArchived", "type": "bool"},
- "auto_rebuild": {"key": "autoRebuild", "type": "str"},
- "build": {"key": "build", "type": "BuildContext"},
- "conda_file": {"key": "condaFile", "type": "str"},
- "environment_type": {"key": "environmentType", "type": "str"},
- "image": {"key": "image", "type": "str"},
- "inference_config": {"key": "inferenceConfig", "type": "InferenceContainerProperties"},
- "os_type": {"key": "osType", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "data_uri": {"key": "dataUri", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
"stage": {"key": "stage", "type": "str"},
}
+ _subtype_map = {
+ "data_type": {"mltable": "MLTableData", "uri_file": "UriFileDataVersion", "uri_folder": "UriFolderDataVersion"}
+ }
+
def __init__(
self,
*,
+ data_uri: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
is_anonymous: bool = False,
is_archived: bool = False,
- auto_rebuild: Optional[Union[str, "_models.AutoRebuildSetting"]] = None,
- build: Optional["_models.BuildContext"] = None,
- conda_file: Optional[str] = None,
- image: Optional[str] = None,
- inference_config: Optional["_models.InferenceContainerProperties"] = None,
- os_type: Optional[Union[str, "_models.OperatingSystemType"]] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
stage: Optional[str] = None,
**kwargs: Any
) -> None:
@@ -9032,2848 +10344,7673 @@ def __init__(
:paramtype properties: dict[str, str]
:keyword tags: Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:paramtype is_archived: bool
- :keyword auto_rebuild: Defines if image needs to be rebuilt based on base image changes. Known
- values are: "Disabled" and "OnBaseImageUpdate".
- :paramtype auto_rebuild: str or ~azure.mgmt.machinelearningservices.models.AutoRebuildSetting
- :keyword build: Configuration settings for Docker build context.
- :paramtype build: ~azure.mgmt.machinelearningservices.models.BuildContext
- :keyword conda_file: Standard configuration file used by Conda that lets you install any kind
- of package, including Python, R, and C/C++ packages.
-
-
- .. raw:: html
-
- .
- :paramtype conda_file: str
- :keyword image: Name of the image that will be used for the environment.
-
-
- .. raw:: html
-
- .
- :paramtype image: str
- :keyword inference_config: Defines configuration specific to inference.
- :paramtype inference_config:
- ~azure.mgmt.machinelearningservices.models.InferenceContainerProperties
- :keyword os_type: The OS type of the environment. Known values are: "Linux" and "Windows".
- :paramtype os_type: str or ~azure.mgmt.machinelearningservices.models.OperatingSystemType
- :keyword stage: Stage in the environment lifecycle assigned to this environment.
+ :keyword data_uri: [Required] Uri of the data. Example:
+ https://go.microsoft.com/fwlink/?linkid=2202330. Required.
+ :paramtype data_uri: str
+ :keyword intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword stage: Stage in the data lifecycle assigned to this data asset.
:paramtype stage: str
"""
super().__init__(
description=description,
properties=properties,
tags=tags,
+ auto_delete_setting=auto_delete_setting,
is_anonymous=is_anonymous,
is_archived=is_archived,
**kwargs
)
- self.auto_rebuild = auto_rebuild
- self.build = build
- self.conda_file = conda_file
- self.environment_type = None
- self.image = image
- self.inference_config = inference_config
- self.os_type = os_type
- self.provisioning_state = None
+ self.data_type: Optional[str] = None
+ self.data_uri = data_uri
+ self.intellectual_property = intellectual_property
self.stage = stage
-class EnvironmentVersionResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of EnvironmentVersion entities.
+class DataImport(DataVersionBaseProperties): # pylint: disable=too-many-instance-attributes
+ """DataImport.
- :ivar next_link: The link to the next page of EnvironmentVersion objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type EnvironmentVersion.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentVersion]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :vartype is_archived: bool
+ :ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
+ "uri_folder", and "mltable".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
+ :ivar data_uri: [Required] Uri of the data. Example:
+ https://go.microsoft.com/fwlink/?linkid=2202330. Required.
+ :vartype data_uri: str
+ :ivar intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :ivar stage: Stage in the data lifecycle assigned to this data asset.
+ :vartype stage: str
+ :ivar asset_name: Name of the asset for data import job to create.
+ :vartype asset_name: str
+ :ivar source: Source data of the asset to import from.
+ :vartype source: ~azure.mgmt.machinelearningservices.models.DataImportSource
"""
+ _validation = {
+ "data_type": {"required": True},
+ "data_uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[EnvironmentVersion]"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "data_uri": {"key": "dataUri", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
+ "stage": {"key": "stage", "type": "str"},
+ "asset_name": {"key": "assetName", "type": "str"},
+ "source": {"key": "source", "type": "DataImportSource"},
}
def __init__(
self,
*,
- next_link: Optional[str] = None,
- value: Optional[List["_models.EnvironmentVersion"]] = None,
- **kwargs: Any
+ data_uri: str,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ stage: Optional[str] = None,
+ asset_name: Optional[str] = None,
+ source: Optional["_models.DataImportSource"] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of EnvironmentVersion objects. If null, there are
- no additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type EnvironmentVersion.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentVersion]
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :paramtype is_archived: bool
+ :keyword data_uri: [Required] Uri of the data. Example:
+ https://go.microsoft.com/fwlink/?linkid=2202330. Required.
+ :paramtype data_uri: str
+ :keyword intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword stage: Stage in the data lifecycle assigned to this data asset.
+ :paramtype stage: str
+ :keyword asset_name: Name of the asset for data import job to create.
+ :paramtype asset_name: str
+ :keyword source: Source data of the asset to import from.
+ :paramtype source: ~azure.mgmt.machinelearningservices.models.DataImportSource
"""
- super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ auto_delete_setting=auto_delete_setting,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
+ data_uri=data_uri,
+ intellectual_property=intellectual_property,
+ stage=stage,
+ **kwargs
+ )
+ self.data_type: str = "uri_folder"
+ self.asset_name = asset_name
+ self.source = source
-class ErrorAdditionalInfo(_serialization.Model):
- """The resource management error additional info.
- Variables are only populated by the server, and will be ignored when sending a request.
+class DataLakeAnalyticsSchema(_serialization.Model):
+ """DataLakeAnalyticsSchema.
- :ivar type: The additional info type.
- :vartype type: str
- :ivar info: The additional info.
- :vartype info: JSON
+ :ivar properties:
+ :vartype properties:
+ ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsSchemaProperties
"""
- _validation = {
- "type": {"readonly": True},
- "info": {"readonly": True},
- }
-
_attribute_map = {
- "type": {"key": "type", "type": "str"},
- "info": {"key": "info", "type": "object"},
+ "properties": {"key": "properties", "type": "DataLakeAnalyticsSchemaProperties"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self, *, properties: Optional["_models.DataLakeAnalyticsSchemaProperties"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword properties:
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsSchemaProperties
+ """
super().__init__(**kwargs)
- self.type = None
- self.info = None
+ self.properties = properties
-class ErrorDetail(_serialization.Model):
- """The error detail.
+class DataLakeAnalytics(Compute, DataLakeAnalyticsSchema): # pylint: disable=too-many-instance-attributes
+ """A DataLakeAnalytics compute.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar code: The error code.
- :vartype code: str
- :ivar message: The error message.
- :vartype message: str
- :ivar target: The error target.
- :vartype target: str
- :ivar details: The error details.
- :vartype details: list[~azure.mgmt.machinelearningservices.models.ErrorDetail]
- :ivar additional_info: The error additional info.
- :vartype additional_info: list[~azure.mgmt.machinelearningservices.models.ErrorAdditionalInfo]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar properties:
+ :vartype properties:
+ ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsSchemaProperties
+ :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
+ "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
+ "DataLakeAnalytics", and "SynapseSpark".
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
+ :ivar compute_location: Location for the underlying compute.
+ :vartype compute_location: str
+ :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
+ Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
+ "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningState
+ :ivar description: The description of the Machine Learning compute.
+ :vartype description: str
+ :ivar created_on: The time at which the compute was created.
+ :vartype created_on: ~datetime.datetime
+ :ivar modified_on: The time at which the compute was last modified.
+ :vartype modified_on: ~datetime.datetime
+ :ivar resource_id: ARM resource id of the underlying compute.
+ :vartype resource_id: str
+ :ivar provisioning_errors: Errors during provisioning.
+ :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
+ :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
+ from outside if true, or machine learning service provisioned it if false.
+ :vartype is_attached_compute: bool
+ :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
+ and AAD exclusively for authentication.
+ :vartype disable_local_auth: bool
"""
_validation = {
- "code": {"readonly": True},
- "message": {"readonly": True},
- "target": {"readonly": True},
- "details": {"readonly": True},
- "additional_info": {"readonly": True},
+ "compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "created_on": {"readonly": True},
+ "modified_on": {"readonly": True},
+ "provisioning_errors": {"readonly": True},
+ "is_attached_compute": {"readonly": True},
}
_attribute_map = {
- "code": {"key": "code", "type": "str"},
- "message": {"key": "message", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "details": {"key": "details", "type": "[ErrorDetail]"},
- "additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"},
+ "properties": {"key": "properties", "type": "DataLakeAnalyticsSchemaProperties"},
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_location": {"key": "computeLocation", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "created_on": {"key": "createdOn", "type": "iso-8601"},
+ "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
+ "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
+ "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.code = None
- self.message = None
- self.target = None
- self.details = None
- self.additional_info = None
+ def __init__(
+ self,
+ *,
+ properties: Optional["_models.DataLakeAnalyticsSchemaProperties"] = None,
+ compute_location: Optional[str] = None,
+ description: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ disable_local_auth: Optional[bool] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword properties:
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsSchemaProperties
+ :keyword compute_location: Location for the underlying compute.
+ :paramtype compute_location: str
+ :keyword description: The description of the Machine Learning compute.
+ :paramtype description: str
+ :keyword resource_id: ARM resource id of the underlying compute.
+ :paramtype resource_id: str
+ :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
+ MSI and AAD exclusively for authentication.
+ :paramtype disable_local_auth: bool
+ """
+ super().__init__(
+ compute_location=compute_location,
+ description=description,
+ resource_id=resource_id,
+ disable_local_auth=disable_local_auth,
+ properties=properties,
+ **kwargs
+ )
+ self.properties = properties
+ self.compute_type: str = "DataLakeAnalytics"
+ self.compute_location = compute_location
+ self.provisioning_state = None
+ self.description = description
+ self.created_on = None
+ self.modified_on = None
+ self.resource_id = resource_id
+ self.provisioning_errors = None
+ self.is_attached_compute = None
+ self.disable_local_auth = disable_local_auth
-class ErrorResponse(_serialization.Model):
- """Common error response for all Azure Resource Manager APIs to return error details for failed
- operations. (This also follows the OData error response format.).
+class DataLakeAnalyticsSchemaProperties(_serialization.Model):
+ """DataLakeAnalyticsSchemaProperties.
- :ivar error: The error object.
- :vartype error: ~azure.mgmt.machinelearningservices.models.ErrorDetail
+ :ivar data_lake_store_account_name: DataLake Store Account Name.
+ :vartype data_lake_store_account_name: str
"""
_attribute_map = {
- "error": {"key": "error", "type": "ErrorDetail"},
+ "data_lake_store_account_name": {"key": "dataLakeStoreAccountName", "type": "str"},
}
- def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None:
+ def __init__(self, *, data_lake_store_account_name: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword error: The error object.
- :paramtype error: ~azure.mgmt.machinelearningservices.models.ErrorDetail
+ :keyword data_lake_store_account_name: DataLake Store Account Name.
+ :paramtype data_lake_store_account_name: str
"""
super().__init__(**kwargs)
- self.error = error
+ self.data_lake_store_account_name = data_lake_store_account_name
-class EstimatedVMPrice(_serialization.Model):
- """The estimated price info for using a VM of a particular OS type, tier, etc.
+class DataPathAssetReference(AssetReferenceBase):
+ """Reference to an asset via its path in a datastore.
All required parameters must be populated in order to send to Azure.
- :ivar retail_price: The price charged for using the VM. Required.
- :vartype retail_price: float
- :ivar os_type: Operating system type used by the VM. Required. Known values are: "Linux" and
- "Windows".
- :vartype os_type: str or ~azure.mgmt.machinelearningservices.models.VMPriceOSType
- :ivar vm_tier: The type of the VM. Required. Known values are: "Standard", "LowPriority", and
- "Spot".
- :vartype vm_tier: str or ~azure.mgmt.machinelearningservices.models.VMTier
+ :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
+ are: "Id", "DataPath", and "OutputPath".
+ :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
+ :ivar datastore_id: ARM resource ID of the datastore where the asset is located.
+ :vartype datastore_id: str
+ :ivar path: The path of the file/directory in the datastore.
+ :vartype path: str
"""
_validation = {
- "retail_price": {"required": True},
- "os_type": {"required": True},
- "vm_tier": {"required": True},
+ "reference_type": {"required": True},
}
_attribute_map = {
- "retail_price": {"key": "retailPrice", "type": "float"},
- "os_type": {"key": "osType", "type": "str"},
- "vm_tier": {"key": "vmTier", "type": "str"},
+ "reference_type": {"key": "referenceType", "type": "str"},
+ "datastore_id": {"key": "datastoreId", "type": "str"},
+ "path": {"key": "path", "type": "str"},
}
- def __init__(
- self,
- *,
- retail_price: float,
- os_type: Union[str, "_models.VMPriceOSType"],
- vm_tier: Union[str, "_models.VMTier"],
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, datastore_id: Optional[str] = None, path: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword retail_price: The price charged for using the VM. Required.
- :paramtype retail_price: float
- :keyword os_type: Operating system type used by the VM. Required. Known values are: "Linux" and
- "Windows".
- :paramtype os_type: str or ~azure.mgmt.machinelearningservices.models.VMPriceOSType
- :keyword vm_tier: The type of the VM. Required. Known values are: "Standard", "LowPriority",
- and "Spot".
- :paramtype vm_tier: str or ~azure.mgmt.machinelearningservices.models.VMTier
+ :keyword datastore_id: ARM resource ID of the datastore where the asset is located.
+ :paramtype datastore_id: str
+ :keyword path: The path of the file/directory in the datastore.
+ :paramtype path: str
"""
super().__init__(**kwargs)
- self.retail_price = retail_price
- self.os_type = os_type
- self.vm_tier = vm_tier
+ self.reference_type: str = "DataPath"
+ self.datastore_id = datastore_id
+ self.path = path
-class EstimatedVMPrices(_serialization.Model):
- """The estimated price info for using a VM.
+class DataQualityMonitoringSignal(MonitoringSignalBase):
+ """DataQualityMonitoringSignal.
All required parameters must be populated in order to send to Azure.
- :ivar billing_currency: Three lettered code specifying the currency of the VM price. Example:
- USD. Required. "USD"
- :vartype billing_currency: str or ~azure.mgmt.machinelearningservices.models.BillingCurrency
- :ivar unit_of_measure: The unit of time measurement for the specified VM price. Example:
- OneHour. Required. "OneHour"
- :vartype unit_of_measure: str or ~azure.mgmt.machinelearningservices.models.UnitOfMeasure
- :ivar values: The list of estimated prices for using a VM of a particular OS type, tier, etc.
- Required.
- :vartype values: list[~azure.mgmt.machinelearningservices.models.EstimatedVMPrice]
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", "Custom",
+ "ModelPerformance", "GenerationSafetyQuality", and "GenerationTokenStatistics".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar feature_data_type_override: A dictionary that maps feature names to their respective data
+ types.
+ :vartype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :ivar feature_importance_settings: The settings for computing feature importance.
+ :vartype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :ivar features: The features to calculate drift over.
+ :vartype features: ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterBase
+ :ivar metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.DataQualityMetricThresholdBase]
+ :ivar production_data: [Required] The data produced by the production service which drift will
+ be calculated for. Required.
+ :vartype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :ivar reference_data: [Required] The data to calculate drift against. Required.
+ :vartype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
"""
_validation = {
- "billing_currency": {"required": True},
- "unit_of_measure": {"required": True},
- "values": {"required": True},
+ "signal_type": {"required": True},
+ "metric_thresholds": {"required": True},
+ "production_data": {"required": True},
+ "reference_data": {"required": True},
}
_attribute_map = {
- "billing_currency": {"key": "billingCurrency", "type": "str"},
- "unit_of_measure": {"key": "unitOfMeasure", "type": "str"},
- "values": {"key": "values", "type": "[EstimatedVMPrice]"},
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "feature_data_type_override": {"key": "featureDataTypeOverride", "type": "{str}"},
+ "feature_importance_settings": {"key": "featureImportanceSettings", "type": "FeatureImportanceSettings"},
+ "features": {"key": "features", "type": "MonitoringFeatureFilterBase"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[DataQualityMetricThresholdBase]"},
+ "production_data": {"key": "productionData", "type": "MonitoringInputDataBase"},
+ "reference_data": {"key": "referenceData", "type": "MonitoringInputDataBase"},
}
def __init__(
self,
*,
- billing_currency: Union[str, "_models.BillingCurrency"],
- unit_of_measure: Union[str, "_models.UnitOfMeasure"],
- values: List["_models.EstimatedVMPrice"],
+ metric_thresholds: List["_models.DataQualityMetricThresholdBase"],
+ production_data: "_models.MonitoringInputDataBase",
+ reference_data: "_models.MonitoringInputDataBase",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ feature_data_type_override: Optional[Dict[str, Union[str, "_models.MonitoringFeatureDataType"]]] = None,
+ feature_importance_settings: Optional["_models.FeatureImportanceSettings"] = None,
+ features: Optional["_models.MonitoringFeatureFilterBase"] = None,
**kwargs: Any
) -> None:
"""
- :keyword billing_currency: Three lettered code specifying the currency of the VM price.
- Example: USD. Required. "USD"
- :paramtype billing_currency: str or ~azure.mgmt.machinelearningservices.models.BillingCurrency
- :keyword unit_of_measure: The unit of time measurement for the specified VM price. Example:
- OneHour. Required. "OneHour"
- :paramtype unit_of_measure: str or ~azure.mgmt.machinelearningservices.models.UnitOfMeasure
- :keyword values: The list of estimated prices for using a VM of a particular OS type, tier,
- etc. Required.
- :paramtype values: list[~azure.mgmt.machinelearningservices.models.EstimatedVMPrice]
- """
- super().__init__(**kwargs)
- self.billing_currency = billing_currency
- self.unit_of_measure = unit_of_measure
- self.values = values
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword feature_data_type_override: A dictionary that maps feature names to their respective
+ data types.
+ :paramtype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :keyword feature_importance_settings: The settings for computing feature importance.
+ :paramtype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :keyword features: The features to calculate drift over.
+ :paramtype features: ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterBase
+ :keyword metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.DataQualityMetricThresholdBase]
+ :keyword production_data: [Required] The data produced by the production service which drift
+ will be calculated for. Required.
+ :paramtype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :keyword reference_data: [Required] The data to calculate drift against. Required.
+ :paramtype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "DataQuality"
+ self.feature_data_type_override = feature_data_type_override
+ self.feature_importance_settings = feature_importance_settings
+ self.features = features
+ self.metric_thresholds = metric_thresholds
+ self.production_data = production_data
+ self.reference_data = reference_data
+
+
+class DatasetExportSummary(ExportSummary):
+ """DatasetExportSummary.
+ Variables are only populated by the server, and will be ignored when sending a request.
-class ExternalFQDNResponse(_serialization.Model):
- """ExternalFQDNResponse.
+ All required parameters must be populated in order to send to Azure.
- :ivar value:
- :vartype value: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoints]
+ :ivar end_date_time: The time when the export was completed.
+ :vartype end_date_time: ~datetime.datetime
+ :ivar exported_row_count: The total number of labeled datapoints exported.
+ :vartype exported_row_count: int
+ :ivar format: [Required] The format of exported labels, also as the discriminator. Required.
+ Known values are: "Dataset", "Coco", and "CSV".
+ :vartype format: str or ~azure.mgmt.machinelearningservices.models.ExportFormatType
+ :ivar labeling_job_id: Name and identifier of the job containing exported labels.
+ :vartype labeling_job_id: str
+ :ivar start_date_time: The time when the export was requested.
+ :vartype start_date_time: ~datetime.datetime
+ :ivar labeled_asset_name: The unique name of the labeled data asset.
+ :vartype labeled_asset_name: str
"""
+ _validation = {
+ "end_date_time": {"readonly": True},
+ "exported_row_count": {"readonly": True},
+ "format": {"required": True},
+ "labeling_job_id": {"readonly": True},
+ "start_date_time": {"readonly": True},
+ "labeled_asset_name": {"readonly": True},
+ }
+
_attribute_map = {
- "value": {"key": "value", "type": "[FQDNEndpoints]"},
+ "end_date_time": {"key": "endDateTime", "type": "iso-8601"},
+ "exported_row_count": {"key": "exportedRowCount", "type": "int"},
+ "format": {"key": "format", "type": "str"},
+ "labeling_job_id": {"key": "labelingJobId", "type": "str"},
+ "start_date_time": {"key": "startDateTime", "type": "iso-8601"},
+ "labeled_asset_name": {"key": "labeledAssetName", "type": "str"},
}
- def __init__(self, *, value: Optional[List["_models.FQDNEndpoints"]] = None, **kwargs: Any) -> None:
- """
- :keyword value:
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoints]
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.value = value
+ self.format: str = "Dataset"
+ self.labeled_asset_name = None
-class FeaturizationSettings(_serialization.Model):
- """Featurization Configuration.
+class Datastore(ProxyResource):
+ """Azure Resource Manager resource envelope.
- :ivar dataset_language: Dataset language, useful for the text data.
- :vartype dataset_language: str
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.DatastoreProperties
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
_attribute_map = {
- "dataset_language": {"key": "datasetLanguage", "type": "str"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "DatastoreProperties"},
}
- def __init__(self, *, dataset_language: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.DatastoreProperties", **kwargs: Any) -> None:
"""
- :keyword dataset_language: Dataset language, useful for the text data.
- :paramtype dataset_language: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.DatastoreProperties
"""
super().__init__(**kwargs)
- self.dataset_language = dataset_language
+ self.properties = properties
-class FlavorData(_serialization.Model):
- """FlavorData.
+class DatastoreResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of Datastore entities.
- :ivar data: Model flavor-specific data.
- :vartype data: dict[str, str]
+ :ivar next_link: The link to the next page of Datastore objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type Datastore.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Datastore]
"""
_attribute_map = {
- "data": {"key": "data", "type": "{str}"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[Datastore]"},
}
- def __init__(self, *, data: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.Datastore"]] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword data: Model flavor-specific data.
- :paramtype data: dict[str, str]
+ :keyword next_link: The link to the next page of Datastore objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type Datastore.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.Datastore]
"""
super().__init__(**kwargs)
- self.data = data
+ self.next_link = next_link
+ self.value = value
-class Forecasting(TableVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
- """Forecasting task in AutoML Table vertical.
+class DataVersionBase(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar cv_split_column_names: Columns to use for CVSplit data.
- :vartype cv_split_column_names: list[str]
- :ivar featurization_settings: Featurization inputs needed for AutoML job.
- :vartype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :ivar limit_settings: Execution constraints for AutoMLJob.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
- when validation dataset is not provided.
- :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :ivar test_data: Test data input.
- :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype test_data_size: float
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :vartype weight_column_name: str
- :ivar forecasting_settings: Forecasting task specific inputs.
- :vartype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
- :ivar primary_metric: Primary metric for forecasting task. Known values are:
- "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
- "NormalizedMeanAbsoluteError".
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
- :ivar training_settings: Inputs for training phase for an AutoML Job.
- :vartype training_settings:
- ~azure.mgmt.machinelearningservices.models.ForecastingTrainingSettings
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.DataVersionBaseProperties
"""
_validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
- "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
- "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
- "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
- "test_data": {"key": "testData", "type": "MLTableJobInput"},
- "test_data_size": {"key": "testDataSize", "type": "float"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "weight_column_name": {"key": "weightColumnName", "type": "str"},
- "forecasting_settings": {"key": "forecastingSettings", "type": "ForecastingSettings"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
- "training_settings": {"key": "trainingSettings", "type": "ForecastingTrainingSettings"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "DataVersionBaseProperties"},
}
- def __init__(
- self,
- *,
- training_data: "_models.MLTableJobInput",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- cv_split_column_names: Optional[List[str]] = None,
- featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
- limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
- n_cross_validations: Optional["_models.NCrossValidations"] = None,
- test_data: Optional["_models.MLTableJobInput"] = None,
- test_data_size: Optional[float] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- weight_column_name: Optional[str] = None,
- forecasting_settings: Optional["_models.ForecastingSettings"] = None,
- primary_metric: Optional[Union[str, "_models.ForecastingPrimaryMetrics"]] = None,
- training_settings: Optional["_models.ForecastingTrainingSettings"] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, properties: "_models.DataVersionBaseProperties", **kwargs: Any) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword cv_split_column_names: Columns to use for CVSplit data.
- :paramtype cv_split_column_names: list[str]
- :keyword featurization_settings: Featurization inputs needed for AutoML job.
- :paramtype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :keyword limit_settings: Execution constraints for AutoMLJob.
- :paramtype limit_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :keyword n_cross_validations: Number of cross validation folds to be applied on training
- dataset
- when validation dataset is not provided.
- :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :keyword test_data: Test data input.
- :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype test_data_size: float
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :paramtype weight_column_name: str
- :keyword forecasting_settings: Forecasting task specific inputs.
- :paramtype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
- :keyword primary_metric: Primary metric for forecasting task. Known values are:
- "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
- "NormalizedMeanAbsoluteError".
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
- :keyword training_settings: Inputs for training phase for an AutoML Job.
- :paramtype training_settings:
- ~azure.mgmt.machinelearningservices.models.ForecastingTrainingSettings
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.DataVersionBaseProperties
"""
- super().__init__(
- cv_split_column_names=cv_split_column_names,
- featurization_settings=featurization_settings,
- limit_settings=limit_settings,
- n_cross_validations=n_cross_validations,
- test_data=test_data,
- test_data_size=test_data_size,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- weight_column_name=weight_column_name,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "Forecasting"
- self.training_data = training_data
- self.forecasting_settings = forecasting_settings
- self.primary_metric = primary_metric
- self.training_settings = training_settings
- self.cv_split_column_names = cv_split_column_names
- self.featurization_settings = featurization_settings
- self.limit_settings = limit_settings
- self.n_cross_validations = n_cross_validations
- self.test_data = test_data
- self.test_data_size = test_data_size
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.weight_column_name = weight_column_name
+ super().__init__(**kwargs)
+ self.properties = properties
-class ForecastingSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes
- """Forecasting specific parameters.
+class DataVersionBaseResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of DataVersionBase entities.
- :ivar country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
- These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
- :vartype country_or_region_for_holidays: str
- :ivar cv_step_size: Number of periods between the origin time of one CV fold and the next fold.
- For
- example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
- three days apart.
- :vartype cv_step_size: int
- :ivar feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
- Known values are: "None" and "Auto".
- :vartype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
- :ivar forecast_horizon: The desired maximum forecast horizon in units of time-series frequency.
- :vartype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
- :ivar frequency: When forecasting, this parameter represents the period with which the forecast
- is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset frequency
- by default.
- :vartype frequency: str
- :ivar seasonality: Set time series seasonality as an integer multiple of the series frequency.
- If seasonality is set to 'auto', it will be inferred.
- :vartype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
- :ivar short_series_handling_config: The parameter defining how if AutoML should handle short
- time series. Known values are: "None", "Auto", "Pad", and "Drop".
- :vartype short_series_handling_config: str or
- ~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
- :ivar target_aggregate_function: The function to be used to aggregate the time series target
- column to conform to a user specified frequency.
- If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
- error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
- Known values are: "None", "Sum", "Max", "Min", and "Mean".
- :vartype target_aggregate_function: str or
- ~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
- :ivar target_lags: The number of past periods to lag from the target column.
- :vartype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
- :ivar target_rolling_window_size: The number of past periods used to create a rolling window
- average of the target column.
- :vartype target_rolling_window_size:
- ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
- :ivar time_column_name: The name of the time column. This parameter is required when
- forecasting to specify the datetime column in the input data used for building the time series
- and inferring its frequency.
- :vartype time_column_name: str
- :ivar time_series_id_column_names: The names of columns used to group a timeseries. It can be
- used to create multiple series.
- If grain is not defined, the data set is assumed to be one time-series. This parameter is used
- with task type forecasting.
- :vartype time_series_id_column_names: list[str]
- :ivar use_stl: Configure STL Decomposition of the time-series target column. Known values are:
- "None", "Season", and "SeasonTrend".
- :vartype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
+ :ivar next_link: The link to the next page of DataVersionBase objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type DataVersionBase.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.DataVersionBase]
"""
_attribute_map = {
- "country_or_region_for_holidays": {"key": "countryOrRegionForHolidays", "type": "str"},
- "cv_step_size": {"key": "cvStepSize", "type": "int"},
- "feature_lags": {"key": "featureLags", "type": "str"},
- "forecast_horizon": {"key": "forecastHorizon", "type": "ForecastHorizon"},
- "frequency": {"key": "frequency", "type": "str"},
- "seasonality": {"key": "seasonality", "type": "Seasonality"},
- "short_series_handling_config": {"key": "shortSeriesHandlingConfig", "type": "str"},
- "target_aggregate_function": {"key": "targetAggregateFunction", "type": "str"},
- "target_lags": {"key": "targetLags", "type": "TargetLags"},
- "target_rolling_window_size": {"key": "targetRollingWindowSize", "type": "TargetRollingWindowSize"},
- "time_column_name": {"key": "timeColumnName", "type": "str"},
- "time_series_id_column_names": {"key": "timeSeriesIdColumnNames", "type": "[str]"},
- "use_stl": {"key": "useStl", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[DataVersionBase]"},
}
def __init__(
- self,
- *,
- country_or_region_for_holidays: Optional[str] = None,
- cv_step_size: Optional[int] = None,
- feature_lags: Optional[Union[str, "_models.FeatureLags"]] = None,
- forecast_horizon: Optional["_models.ForecastHorizon"] = None,
- frequency: Optional[str] = None,
- seasonality: Optional["_models.Seasonality"] = None,
- short_series_handling_config: Optional[Union[str, "_models.ShortSeriesHandlingConfiguration"]] = None,
- target_aggregate_function: Optional[Union[str, "_models.TargetAggregationFunction"]] = None,
- target_lags: Optional["_models.TargetLags"] = None,
- target_rolling_window_size: Optional["_models.TargetRollingWindowSize"] = None,
- time_column_name: Optional[str] = None,
- time_series_id_column_names: Optional[List[str]] = None,
- use_stl: Optional[Union[str, "_models.UseStl"]] = None,
- **kwargs: Any
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.DataVersionBase"]] = None, **kwargs: Any
) -> None:
"""
- :keyword country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
- These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
- :paramtype country_or_region_for_holidays: str
- :keyword cv_step_size: Number of periods between the origin time of one CV fold and the next
- fold. For
- example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
- three days apart.
- :paramtype cv_step_size: int
- :keyword feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
- Known values are: "None" and "Auto".
- :paramtype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
- :keyword forecast_horizon: The desired maximum forecast horizon in units of time-series
- frequency.
- :paramtype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
- :keyword frequency: When forecasting, this parameter represents the period with which the
- forecast is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset
- frequency by default.
- :paramtype frequency: str
- :keyword seasonality: Set time series seasonality as an integer multiple of the series
- frequency.
- If seasonality is set to 'auto', it will be inferred.
- :paramtype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
- :keyword short_series_handling_config: The parameter defining how if AutoML should handle short
- time series. Known values are: "None", "Auto", "Pad", and "Drop".
- :paramtype short_series_handling_config: str or
- ~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
- :keyword target_aggregate_function: The function to be used to aggregate the time series target
- column to conform to a user specified frequency.
- If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
- error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
- Known values are: "None", "Sum", "Max", "Min", and "Mean".
- :paramtype target_aggregate_function: str or
- ~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
- :keyword target_lags: The number of past periods to lag from the target column.
- :paramtype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
- :keyword target_rolling_window_size: The number of past periods used to create a rolling window
- average of the target column.
- :paramtype target_rolling_window_size:
- ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
- :keyword time_column_name: The name of the time column. This parameter is required when
- forecasting to specify the datetime column in the input data used for building the time series
- and inferring its frequency.
- :paramtype time_column_name: str
- :keyword time_series_id_column_names: The names of columns used to group a timeseries. It can
- be used to create multiple series.
- If grain is not defined, the data set is assumed to be one time-series. This parameter is used
- with task type forecasting.
- :paramtype time_series_id_column_names: list[str]
- :keyword use_stl: Configure STL Decomposition of the time-series target column. Known values
- are: "None", "Season", and "SeasonTrend".
- :paramtype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
+ :keyword next_link: The link to the next page of DataVersionBase objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type DataVersionBase.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.DataVersionBase]
"""
super().__init__(**kwargs)
- self.country_or_region_for_holidays = country_or_region_for_holidays
- self.cv_step_size = cv_step_size
- self.feature_lags = feature_lags
- self.forecast_horizon = forecast_horizon
- self.frequency = frequency
- self.seasonality = seasonality
- self.short_series_handling_config = short_series_handling_config
- self.target_aggregate_function = target_aggregate_function
- self.target_lags = target_lags
- self.target_rolling_window_size = target_rolling_window_size
- self.time_column_name = time_column_name
- self.time_series_id_column_names = time_series_id_column_names
- self.use_stl = use_stl
+ self.next_link = next_link
+ self.value = value
-class ForecastingTrainingSettings(TrainingSettings):
- """Forecasting Training related configuration.
+class OnlineScaleSettings(_serialization.Model):
+ """Online deployment scaling configuration.
- :ivar enable_dnn_training: Enable recommendation of DNN models.
- :vartype enable_dnn_training: bool
- :ivar enable_model_explainability: Flag to turn on explainability on best model.
- :vartype enable_model_explainability: bool
- :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :vartype enable_onnx_compatible_models: bool
- :ivar enable_stack_ensemble: Enable stack ensemble run.
- :vartype enable_stack_ensemble: bool
- :ivar enable_vote_ensemble: Enable voting ensemble run.
- :vartype enable_vote_ensemble: bool
- :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :vartype ensemble_model_download_timeout: ~datetime.timedelta
- :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :vartype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :ivar allowed_training_algorithms: Allowed models for forecasting task.
- :vartype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ForecastingModels]
- :ivar blocked_training_algorithms: Blocked models for forecasting task.
- :vartype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ DefaultScaleSettings, TargetUtilizationScaleSettings
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar scale_type: [Required] Type of deployment scaling algorithm. Required. Known values are:
+ "Default" and "TargetUtilization".
+ :vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.ScaleType
"""
+ _validation = {
+ "scale_type": {"required": True},
+ }
+
_attribute_map = {
- "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
- "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
- "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
- "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
- "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
- "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
- "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
- "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
- "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
+ "scale_type": {"key": "scaleType", "type": "str"},
}
- def __init__(
- self,
- *,
- enable_dnn_training: bool = False,
- enable_model_explainability: bool = True,
- enable_onnx_compatible_models: bool = False,
- enable_stack_ensemble: bool = True,
- enable_vote_ensemble: bool = True,
- ensemble_model_download_timeout: datetime.timedelta = "PT5M",
- stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
- allowed_training_algorithms: Optional[List[Union[str, "_models.ForecastingModels"]]] = None,
- blocked_training_algorithms: Optional[List[Union[str, "_models.ForecastingModels"]]] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword enable_dnn_training: Enable recommendation of DNN models.
- :paramtype enable_dnn_training: bool
- :keyword enable_model_explainability: Flag to turn on explainability on best model.
- :paramtype enable_model_explainability: bool
- :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :paramtype enable_onnx_compatible_models: bool
- :keyword enable_stack_ensemble: Enable stack ensemble run.
- :paramtype enable_stack_ensemble: bool
- :keyword enable_vote_ensemble: Enable voting ensemble run.
- :paramtype enable_vote_ensemble: bool
- :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :paramtype ensemble_model_download_timeout: ~datetime.timedelta
- :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :paramtype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :keyword allowed_training_algorithms: Allowed models for forecasting task.
- :paramtype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ForecastingModels]
- :keyword blocked_training_algorithms: Blocked models for forecasting task.
- :paramtype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ForecastingModels]
- """
- super().__init__(
- enable_dnn_training=enable_dnn_training,
- enable_model_explainability=enable_model_explainability,
- enable_onnx_compatible_models=enable_onnx_compatible_models,
- enable_stack_ensemble=enable_stack_ensemble,
- enable_vote_ensemble=enable_vote_ensemble,
- ensemble_model_download_timeout=ensemble_model_download_timeout,
- stack_ensemble_settings=stack_ensemble_settings,
- **kwargs
- )
- self.allowed_training_algorithms = allowed_training_algorithms
- self.blocked_training_algorithms = blocked_training_algorithms
+ _subtype_map = {
+ "scale_type": {"Default": "DefaultScaleSettings", "TargetUtilization": "TargetUtilizationScaleSettings"}
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.scale_type: Optional[str] = None
-class FQDNEndpoint(_serialization.Model):
- """FQDNEndpoint.
+class DefaultScaleSettings(OnlineScaleSettings):
+ """DefaultScaleSettings.
- :ivar domain_name:
- :vartype domain_name: str
- :ivar endpoint_details:
- :vartype endpoint_details: list[~azure.mgmt.machinelearningservices.models.FQDNEndpointDetail]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar scale_type: [Required] Type of deployment scaling algorithm. Required. Known values are:
+ "Default" and "TargetUtilization".
+ :vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.ScaleType
"""
+ _validation = {
+ "scale_type": {"required": True},
+ }
+
_attribute_map = {
- "domain_name": {"key": "domainName", "type": "str"},
- "endpoint_details": {"key": "endpointDetails", "type": "[FQDNEndpointDetail]"},
+ "scale_type": {"key": "scaleType", "type": "str"},
}
- def __init__(
- self,
- *,
- domain_name: Optional[str] = None,
- endpoint_details: Optional[List["_models.FQDNEndpointDetail"]] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword domain_name:
- :paramtype domain_name: str
- :keyword endpoint_details:
- :paramtype endpoint_details:
- list[~azure.mgmt.machinelearningservices.models.FQDNEndpointDetail]
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.domain_name = domain_name
- self.endpoint_details = endpoint_details
+ self.scale_type: str = "Default"
-class FQDNEndpointDetail(_serialization.Model):
- """FQDNEndpointDetail.
+class DeploymentLogs(_serialization.Model):
+ """DeploymentLogs.
- :ivar port:
- :vartype port: int
+ :ivar content: The retrieved online deployment logs.
+ :vartype content: str
"""
_attribute_map = {
- "port": {"key": "port", "type": "int"},
+ "content": {"key": "content", "type": "str"},
}
- def __init__(self, *, port: Optional[int] = None, **kwargs: Any) -> None:
+ def __init__(self, *, content: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword port:
- :paramtype port: int
+ :keyword content: The retrieved online deployment logs.
+ :paramtype content: str
"""
super().__init__(**kwargs)
- self.port = port
+ self.content = content
-class FQDNEndpoints(_serialization.Model):
- """FQDNEndpoints.
+class DeploymentLogsRequest(_serialization.Model):
+ """DeploymentLogsRequest.
- :ivar properties:
- :vartype properties: ~azure.mgmt.machinelearningservices.models.FQDNEndpointsProperties
+ :ivar container_type: The type of container to retrieve logs from. Known values are:
+ "StorageInitializer", "InferenceServer", and "ModelDataCollector".
+ :vartype container_type: str or ~azure.mgmt.machinelearningservices.models.ContainerType
+ :ivar tail: The maximum number of lines to tail.
+ :vartype tail: int
"""
_attribute_map = {
- "properties": {"key": "properties", "type": "FQDNEndpointsProperties"},
+ "container_type": {"key": "containerType", "type": "str"},
+ "tail": {"key": "tail", "type": "int"},
}
- def __init__(self, *, properties: Optional["_models.FQDNEndpointsProperties"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ container_type: Optional[Union[str, "_models.ContainerType"]] = None,
+ tail: Optional[int] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword properties:
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.FQDNEndpointsProperties
+ :keyword container_type: The type of container to retrieve logs from. Known values are:
+ "StorageInitializer", "InferenceServer", and "ModelDataCollector".
+ :paramtype container_type: str or ~azure.mgmt.machinelearningservices.models.ContainerType
+ :keyword tail: The maximum number of lines to tail.
+ :paramtype tail: int
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.container_type = container_type
+ self.tail = tail
-class FQDNEndpointsProperties(_serialization.Model):
- """FQDNEndpointsProperties.
+class ResourceConfiguration(_serialization.Model):
+ """ResourceConfiguration.
- :ivar category:
- :vartype category: str
- :ivar endpoints:
- :vartype endpoints: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoint]
+ :ivar instance_count: Optional number of instances or nodes used by the compute target.
+ :vartype instance_count: int
+ :ivar instance_type: Optional type of VM used as supported by the compute target.
+ :vartype instance_type: str
+ :ivar locations: Locations where the job can run.
+ :vartype locations: list[str]
+ :ivar max_instance_count: Optional max allowed number of instances or nodes to be used by the
+ compute target.
+ For use with elastic training, currently supported by PyTorch distribution type only.
+ :vartype max_instance_count: int
+ :ivar properties: Additional properties bag.
+ :vartype properties: dict[str, JSON]
"""
_attribute_map = {
- "category": {"key": "category", "type": "str"},
- "endpoints": {"key": "endpoints", "type": "[FQDNEndpoint]"},
+ "instance_count": {"key": "instanceCount", "type": "int"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "locations": {"key": "locations", "type": "[str]"},
+ "max_instance_count": {"key": "maxInstanceCount", "type": "int"},
+ "properties": {"key": "properties", "type": "{object}"},
}
def __init__(
- self, *, category: Optional[str] = None, endpoints: Optional[List["_models.FQDNEndpoint"]] = None, **kwargs: Any
+ self,
+ *,
+ instance_count: int = 1,
+ instance_type: Optional[str] = None,
+ locations: Optional[List[str]] = None,
+ max_instance_count: Optional[int] = None,
+ properties: Optional[Dict[str, JSON]] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword category:
- :paramtype category: str
- :keyword endpoints:
- :paramtype endpoints: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoint]
+ :keyword instance_count: Optional number of instances or nodes used by the compute target.
+ :paramtype instance_count: int
+ :keyword instance_type: Optional type of VM used as supported by the compute target.
+ :paramtype instance_type: str
+ :keyword locations: Locations where the job can run.
+ :paramtype locations: list[str]
+ :keyword max_instance_count: Optional max allowed number of instances or nodes to be used by
+ the compute target.
+ For use with elastic training, currently supported by PyTorch distribution type only.
+ :paramtype max_instance_count: int
+ :keyword properties: Additional properties bag.
+ :paramtype properties: dict[str, JSON]
"""
super().__init__(**kwargs)
- self.category = category
- self.endpoints = endpoints
-
+ self.instance_count = instance_count
+ self.instance_type = instance_type
+ self.locations = locations
+ self.max_instance_count = max_instance_count
+ self.properties = properties
-class GridSamplingAlgorithm(SamplingAlgorithm):
- """Defines a Sampling Algorithm that exhaustively generates every value combination in the space.
- All required parameters must be populated in order to send to Azure.
+class DeploymentResourceConfiguration(ResourceConfiguration):
+ """DeploymentResourceConfiguration.
- :ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
- values, along with configuration properties. Required. Known values are: "Grid", "Random", and
- "Bayesian".
- :vartype sampling_algorithm_type: str or
- ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ :ivar instance_count: Optional number of instances or nodes used by the compute target.
+ :vartype instance_count: int
+ :ivar instance_type: Optional type of VM used as supported by the compute target.
+ :vartype instance_type: str
+ :ivar locations: Locations where the job can run.
+ :vartype locations: list[str]
+ :ivar max_instance_count: Optional max allowed number of instances or nodes to be used by the
+ compute target.
+ For use with elastic training, currently supported by PyTorch distribution type only.
+ :vartype max_instance_count: int
+ :ivar properties: Additional properties bag.
+ :vartype properties: dict[str, JSON]
"""
- _validation = {
- "sampling_algorithm_type": {"required": True},
- }
-
_attribute_map = {
- "sampling_algorithm_type": {"key": "samplingAlgorithmType", "type": "str"},
- }
-
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.sampling_algorithm_type: str = "Grid"
-
-
-class HDInsightSchema(_serialization.Model):
- """HDInsightSchema.
-
- :ivar properties: HDInsight compute properties.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
- """
-
- _attribute_map = {
- "properties": {"key": "properties", "type": "HDInsightProperties"},
- }
-
- def __init__(self, *, properties: Optional["_models.HDInsightProperties"] = None, **kwargs: Any) -> None:
- """
- :keyword properties: HDInsight compute properties.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
- """
- super().__init__(**kwargs)
- self.properties = properties
-
-
-class HDInsight(Compute, HDInsightSchema): # pylint: disable=too-many-instance-attributes
- """A HDInsight compute.
-
- Variables are only populated by the server, and will be ignored when sending a request.
-
- All required parameters must be populated in order to send to Azure.
-
- :ivar properties: HDInsight compute properties.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
- :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
- "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
- "DataLakeAnalytics", and "SynapseSpark".
- :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
- :ivar compute_location: Location for the underlying compute.
- :vartype compute_location: str
- :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
- Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
- "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningState
- :ivar description: The description of the Machine Learning compute.
- :vartype description: str
- :ivar created_on: The time at which the compute was created.
- :vartype created_on: ~datetime.datetime
- :ivar modified_on: The time at which the compute was last modified.
- :vartype modified_on: ~datetime.datetime
- :ivar resource_id: ARM resource id of the underlying compute.
- :vartype resource_id: str
- :ivar provisioning_errors: Errors during provisioning.
- :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
- :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
- from outside if true, or machine learning service provisioned it if false.
- :vartype is_attached_compute: bool
- :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
- and AAD exclusively for authentication.
- :vartype disable_local_auth: bool
- """
-
- _validation = {
- "compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
- "created_on": {"readonly": True},
- "modified_on": {"readonly": True},
- "provisioning_errors": {"readonly": True},
- "is_attached_compute": {"readonly": True},
- }
-
- _attribute_map = {
- "properties": {"key": "properties", "type": "HDInsightProperties"},
- "compute_type": {"key": "computeType", "type": "str"},
- "compute_location": {"key": "computeLocation", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "created_on": {"key": "createdOn", "type": "iso-8601"},
- "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
- "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
- "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ "instance_count": {"key": "instanceCount", "type": "int"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "locations": {"key": "locations", "type": "[str]"},
+ "max_instance_count": {"key": "maxInstanceCount", "type": "int"},
+ "properties": {"key": "properties", "type": "{object}"},
}
def __init__(
self,
*,
- properties: Optional["_models.HDInsightProperties"] = None,
- compute_location: Optional[str] = None,
- description: Optional[str] = None,
- resource_id: Optional[str] = None,
- disable_local_auth: Optional[bool] = None,
+ instance_count: int = 1,
+ instance_type: Optional[str] = None,
+ locations: Optional[List[str]] = None,
+ max_instance_count: Optional[int] = None,
+ properties: Optional[Dict[str, JSON]] = None,
**kwargs: Any
) -> None:
"""
- :keyword properties: HDInsight compute properties.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
- :keyword compute_location: Location for the underlying compute.
- :paramtype compute_location: str
- :keyword description: The description of the Machine Learning compute.
- :paramtype description: str
- :keyword resource_id: ARM resource id of the underlying compute.
- :paramtype resource_id: str
- :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
- MSI and AAD exclusively for authentication.
- :paramtype disable_local_auth: bool
+ :keyword instance_count: Optional number of instances or nodes used by the compute target.
+ :paramtype instance_count: int
+ :keyword instance_type: Optional type of VM used as supported by the compute target.
+ :paramtype instance_type: str
+ :keyword locations: Locations where the job can run.
+ :paramtype locations: list[str]
+ :keyword max_instance_count: Optional max allowed number of instances or nodes to be used by
+ the compute target.
+ For use with elastic training, currently supported by PyTorch distribution type only.
+ :paramtype max_instance_count: int
+ :keyword properties: Additional properties bag.
+ :paramtype properties: dict[str, JSON]
"""
super().__init__(
- compute_location=compute_location,
- description=description,
- resource_id=resource_id,
- disable_local_auth=disable_local_auth,
+ instance_count=instance_count,
+ instance_type=instance_type,
+ locations=locations,
+ max_instance_count=max_instance_count,
properties=properties,
**kwargs
)
- self.properties = properties
- self.compute_type: str = "HDInsight"
- self.compute_location = compute_location
- self.provisioning_state = None
- self.description = description
- self.created_on = None
- self.modified_on = None
- self.resource_id = resource_id
- self.provisioning_errors = None
- self.is_attached_compute = None
- self.disable_local_auth = disable_local_auth
-class HDInsightProperties(_serialization.Model):
- """HDInsight compute properties.
+class DiagnoseRequestProperties(_serialization.Model):
+ """DiagnoseRequestProperties.
- :ivar ssh_port: Port open for ssh connections on the master node of the cluster.
- :vartype ssh_port: int
- :ivar address: Public IP address of the master node of the cluster.
- :vartype address: str
- :ivar administrator_account: Admin credentials for master node of the cluster.
- :vartype administrator_account:
- ~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
+ :ivar application_insights: Setting for diagnosing dependent application insights.
+ :vartype application_insights: dict[str, any]
+ :ivar container_registry: Setting for diagnosing dependent container registry.
+ :vartype container_registry: dict[str, any]
+ :ivar dns_resolution: Setting for diagnosing dns resolution.
+ :vartype dns_resolution: dict[str, any]
+ :ivar key_vault: Setting for diagnosing dependent key vault.
+ :vartype key_vault: dict[str, any]
+ :ivar nsg: Setting for diagnosing network security group.
+ :vartype nsg: dict[str, any]
+ :ivar others: Setting for diagnosing unclassified category of problems.
+ :vartype others: dict[str, any]
+ :ivar required_resource_providers: Setting for diagnosing the presence of required resource
+ providers in the workspace.
+ :vartype required_resource_providers: dict[str, any]
+ :ivar resource_lock: Setting for diagnosing resource lock.
+ :vartype resource_lock: dict[str, any]
+ :ivar storage_account: Setting for diagnosing dependent storage account.
+ :vartype storage_account: dict[str, any]
+ :ivar udr: Setting for diagnosing user defined routing.
+ :vartype udr: dict[str, any]
"""
_attribute_map = {
- "ssh_port": {"key": "sshPort", "type": "int"},
- "address": {"key": "address", "type": "str"},
- "administrator_account": {"key": "administratorAccount", "type": "VirtualMachineSshCredentials"},
+ "application_insights": {"key": "applicationInsights", "type": "{object}"},
+ "container_registry": {"key": "containerRegistry", "type": "{object}"},
+ "dns_resolution": {"key": "dnsResolution", "type": "{object}"},
+ "key_vault": {"key": "keyVault", "type": "{object}"},
+ "nsg": {"key": "nsg", "type": "{object}"},
+ "others": {"key": "others", "type": "{object}"},
+ "required_resource_providers": {"key": "requiredResourceProviders", "type": "{object}"},
+ "resource_lock": {"key": "resourceLock", "type": "{object}"},
+ "storage_account": {"key": "storageAccount", "type": "{object}"},
+ "udr": {"key": "udr", "type": "{object}"},
}
def __init__(
self,
*,
- ssh_port: Optional[int] = None,
- address: Optional[str] = None,
- administrator_account: Optional["_models.VirtualMachineSshCredentials"] = None,
+ application_insights: Optional[Dict[str, Any]] = None,
+ container_registry: Optional[Dict[str, Any]] = None,
+ dns_resolution: Optional[Dict[str, Any]] = None,
+ key_vault: Optional[Dict[str, Any]] = None,
+ nsg: Optional[Dict[str, Any]] = None,
+ others: Optional[Dict[str, Any]] = None,
+ required_resource_providers: Optional[Dict[str, Any]] = None,
+ resource_lock: Optional[Dict[str, Any]] = None,
+ storage_account: Optional[Dict[str, Any]] = None,
+ udr: Optional[Dict[str, Any]] = None,
**kwargs: Any
) -> None:
"""
- :keyword ssh_port: Port open for ssh connections on the master node of the cluster.
- :paramtype ssh_port: int
- :keyword address: Public IP address of the master node of the cluster.
- :paramtype address: str
- :keyword administrator_account: Admin credentials for master node of the cluster.
- :paramtype administrator_account:
- ~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
+ :keyword application_insights: Setting for diagnosing dependent application insights.
+ :paramtype application_insights: dict[str, any]
+ :keyword container_registry: Setting for diagnosing dependent container registry.
+ :paramtype container_registry: dict[str, any]
+ :keyword dns_resolution: Setting for diagnosing dns resolution.
+ :paramtype dns_resolution: dict[str, any]
+ :keyword key_vault: Setting for diagnosing dependent key vault.
+ :paramtype key_vault: dict[str, any]
+ :keyword nsg: Setting for diagnosing network security group.
+ :paramtype nsg: dict[str, any]
+ :keyword others: Setting for diagnosing unclassified category of problems.
+ :paramtype others: dict[str, any]
+ :keyword required_resource_providers: Setting for diagnosing the presence of required resource
+ providers in the workspace.
+ :paramtype required_resource_providers: dict[str, any]
+ :keyword resource_lock: Setting for diagnosing resource lock.
+ :paramtype resource_lock: dict[str, any]
+ :keyword storage_account: Setting for diagnosing dependent storage account.
+ :paramtype storage_account: dict[str, any]
+ :keyword udr: Setting for diagnosing user defined routing.
+ :paramtype udr: dict[str, any]
"""
super().__init__(**kwargs)
- self.ssh_port = ssh_port
- self.address = address
- self.administrator_account = administrator_account
-
+ self.application_insights = application_insights
+ self.container_registry = container_registry
+ self.dns_resolution = dns_resolution
+ self.key_vault = key_vault
+ self.nsg = nsg
+ self.others = others
+ self.required_resource_providers = required_resource_providers
+ self.resource_lock = resource_lock
+ self.storage_account = storage_account
+ self.udr = udr
-class IdAssetReference(AssetReferenceBase):
- """Reference to an asset via its ARM resource ID.
- All required parameters must be populated in order to send to Azure.
+class DiagnoseResponseResult(_serialization.Model):
+ """DiagnoseResponseResult.
- :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
- are: "Id", "DataPath", and "OutputPath".
- :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
- :ivar asset_id: [Required] ARM resource ID of the asset. Required.
- :vartype asset_id: str
+ :ivar value:
+ :vartype value: ~azure.mgmt.machinelearningservices.models.DiagnoseResponseResultValue
"""
- _validation = {
- "reference_type": {"required": True},
- "asset_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- }
-
_attribute_map = {
- "reference_type": {"key": "referenceType", "type": "str"},
- "asset_id": {"key": "assetId", "type": "str"},
+ "value": {"key": "value", "type": "DiagnoseResponseResultValue"},
}
- def __init__(self, *, asset_id: str, **kwargs: Any) -> None:
+ def __init__(self, *, value: Optional["_models.DiagnoseResponseResultValue"] = None, **kwargs: Any) -> None:
"""
- :keyword asset_id: [Required] ARM resource ID of the asset. Required.
- :paramtype asset_id: str
+ :keyword value:
+ :paramtype value: ~azure.mgmt.machinelearningservices.models.DiagnoseResponseResultValue
"""
super().__init__(**kwargs)
- self.reference_type: str = "Id"
- self.asset_id = asset_id
+ self.value = value
-class IdentityForCmk(_serialization.Model):
- """Identity that will be used to access key vault for encryption at rest.
+class DiagnoseResponseResultValue(_serialization.Model):
+ """DiagnoseResponseResultValue.
- :ivar user_assigned_identity: The ArmId of the user assigned identity that will be used to
- access the customer managed key vault.
- :vartype user_assigned_identity: str
+ :ivar user_defined_route_results:
+ :vartype user_defined_route_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :ivar network_security_rule_results:
+ :vartype network_security_rule_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :ivar resource_lock_results:
+ :vartype resource_lock_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :ivar dns_resolution_results:
+ :vartype dns_resolution_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :ivar storage_account_results:
+ :vartype storage_account_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :ivar key_vault_results:
+ :vartype key_vault_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :ivar container_registry_results:
+ :vartype container_registry_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :ivar application_insights_results:
+ :vartype application_insights_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :ivar other_results:
+ :vartype other_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
"""
_attribute_map = {
- "user_assigned_identity": {"key": "userAssignedIdentity", "type": "str"},
+ "user_defined_route_results": {"key": "userDefinedRouteResults", "type": "[DiagnoseResult]"},
+ "network_security_rule_results": {"key": "networkSecurityRuleResults", "type": "[DiagnoseResult]"},
+ "resource_lock_results": {"key": "resourceLockResults", "type": "[DiagnoseResult]"},
+ "dns_resolution_results": {"key": "dnsResolutionResults", "type": "[DiagnoseResult]"},
+ "storage_account_results": {"key": "storageAccountResults", "type": "[DiagnoseResult]"},
+ "key_vault_results": {"key": "keyVaultResults", "type": "[DiagnoseResult]"},
+ "container_registry_results": {"key": "containerRegistryResults", "type": "[DiagnoseResult]"},
+ "application_insights_results": {"key": "applicationInsightsResults", "type": "[DiagnoseResult]"},
+ "other_results": {"key": "otherResults", "type": "[DiagnoseResult]"},
}
- def __init__(self, *, user_assigned_identity: Optional[str] = None, **kwargs: Any) -> None:
- """
- :keyword user_assigned_identity: The ArmId of the user assigned identity that will be used to
- access the customer managed key vault.
- :paramtype user_assigned_identity: str
- """
- super().__init__(**kwargs)
- self.user_assigned_identity = user_assigned_identity
-
-
-class IdleShutdownSetting(_serialization.Model):
- """Stops compute instance after user defined period of inactivity.
+ def __init__(
+ self,
+ *,
+ user_defined_route_results: Optional[List["_models.DiagnoseResult"]] = None,
+ network_security_rule_results: Optional[List["_models.DiagnoseResult"]] = None,
+ resource_lock_results: Optional[List["_models.DiagnoseResult"]] = None,
+ dns_resolution_results: Optional[List["_models.DiagnoseResult"]] = None,
+ storage_account_results: Optional[List["_models.DiagnoseResult"]] = None,
+ key_vault_results: Optional[List["_models.DiagnoseResult"]] = None,
+ container_registry_results: Optional[List["_models.DiagnoseResult"]] = None,
+ application_insights_results: Optional[List["_models.DiagnoseResult"]] = None,
+ other_results: Optional[List["_models.DiagnoseResult"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword user_defined_route_results:
+ :paramtype user_defined_route_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :keyword network_security_rule_results:
+ :paramtype network_security_rule_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :keyword resource_lock_results:
+ :paramtype resource_lock_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :keyword dns_resolution_results:
+ :paramtype dns_resolution_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :keyword storage_account_results:
+ :paramtype storage_account_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :keyword key_vault_results:
+ :paramtype key_vault_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :keyword container_registry_results:
+ :paramtype container_registry_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :keyword application_insights_results:
+ :paramtype application_insights_results:
+ list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ :keyword other_results:
+ :paramtype other_results: list[~azure.mgmt.machinelearningservices.models.DiagnoseResult]
+ """
+ super().__init__(**kwargs)
+ self.user_defined_route_results = user_defined_route_results
+ self.network_security_rule_results = network_security_rule_results
+ self.resource_lock_results = resource_lock_results
+ self.dns_resolution_results = dns_resolution_results
+ self.storage_account_results = storage_account_results
+ self.key_vault_results = key_vault_results
+ self.container_registry_results = container_registry_results
+ self.application_insights_results = application_insights_results
+ self.other_results = other_results
- :ivar idle_time_before_shutdown: Time is defined in ISO8601 format. Minimum is 15 min, maximum
- is 3 days.
- :vartype idle_time_before_shutdown: str
+
+class DiagnoseResult(_serialization.Model):
+ """Result of Diagnose.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar code: Code for workspace setup error.
+ :vartype code: str
+ :ivar level: Level of workspace setup error. Known values are: "Warning", "Error", and
+ "Information".
+ :vartype level: str or ~azure.mgmt.machinelearningservices.models.DiagnoseResultLevel
+ :ivar message: Message of workspace setup error.
+ :vartype message: str
"""
+ _validation = {
+ "code": {"readonly": True},
+ "level": {"readonly": True},
+ "message": {"readonly": True},
+ }
+
_attribute_map = {
- "idle_time_before_shutdown": {"key": "idleTimeBeforeShutdown", "type": "str"},
+ "code": {"key": "code", "type": "str"},
+ "level": {"key": "level", "type": "str"},
+ "message": {"key": "message", "type": "str"},
}
- def __init__(self, *, idle_time_before_shutdown: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.code = None
+ self.level = None
+ self.message = None
+
+
+class DiagnoseWorkspaceParameters(_serialization.Model):
+ """Parameters to diagnose a workspace.
+
+ :ivar value:
+ :vartype value: ~azure.mgmt.machinelearningservices.models.DiagnoseRequestProperties
+ """
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "DiagnoseRequestProperties"},
+ }
+
+ def __init__(self, *, value: Optional["_models.DiagnoseRequestProperties"] = None, **kwargs: Any) -> None:
"""
- :keyword idle_time_before_shutdown: Time is defined in ISO8601 format. Minimum is 15 min,
- maximum is 3 days.
- :paramtype idle_time_before_shutdown: str
+ :keyword value:
+ :paramtype value: ~azure.mgmt.machinelearningservices.models.DiagnoseRequestProperties
"""
super().__init__(**kwargs)
- self.idle_time_before_shutdown = idle_time_before_shutdown
+ self.value = value
-class Image(_serialization.Model):
- """Describes the Image Specifications.
+class DistributionConfiguration(_serialization.Model):
+ """Base definition for job distribution configuration.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ Mpi, PyTorch, Ray, TensorFlow
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
+ Known values are: "PyTorch", "TensorFlow", "Mpi", and "Ray".
+ :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
+ """
+
+ _validation = {
+ "distribution_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "distribution_type": {"key": "distributionType", "type": "str"},
+ }
+
+ _subtype_map = {"distribution_type": {"Mpi": "Mpi", "PyTorch": "PyTorch", "Ray": "Ray", "TensorFlow": "TensorFlow"}}
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.distribution_type: Optional[str] = None
+
+
+class Docker(_serialization.Model):
+ """Docker.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, any]
- :ivar type: Type of the image. Possible values are: docker - For docker images. azureml - For
- AzureML images. Known values are: "docker" and "azureml".
- :vartype type: str or ~azure.mgmt.machinelearningservices.models.ImageType
- :ivar reference: Image reference.
- :vartype reference: str
+ :ivar privileged: Indicate whether container shall run in privileged or non-privileged mode.
+ :vartype privileged: bool
"""
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
- "type": {"key": "type", "type": "str"},
- "reference": {"key": "reference", "type": "str"},
+ "privileged": {"key": "privileged", "type": "bool"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, Any]] = None,
- type: Union[str, "_models.ImageType"] = "docker",
- reference: Optional[str] = None,
+ privileged: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, any]
- :keyword type: Type of the image. Possible values are: docker - For docker images. azureml -
- For AzureML images. Known values are: "docker" and "azureml".
- :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ImageType
- :keyword reference: Image reference.
- :paramtype reference: str
+ :keyword privileged: Indicate whether container shall run in privileged or non-privileged mode.
+ :paramtype privileged: bool
"""
super().__init__(**kwargs)
self.additional_properties = additional_properties
- self.type = type
- self.reference = reference
+ self.privileged = privileged
-class ImageVertical(_serialization.Model):
- """Abstract class for AutoML tasks that train image (computer vision) models -
- such as Image Classification / Image Classification Multilabel / Image Object Detection / Image
- Instance Segmentation.
+class EncryptionKeyVaultUpdateProperties(_serialization.Model):
+ """EncryptionKeyVaultUpdateProperties.
All required parameters must be populated in order to send to Azure.
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
+ :ivar key_identifier: Required.
+ :vartype key_identifier: str
"""
_validation = {
- "limit_settings": {"required": True},
+ "key_identifier": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "key_identifier": {"key": "keyIdentifier", "type": "str"},
}
- def __init__(
- self,
- *,
- limit_settings: "_models.ImageLimitSettings",
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, key_identifier: str, **kwargs: Any) -> None:
"""
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
+ :keyword key_identifier: Required.
+ :paramtype key_identifier: str
"""
super().__init__(**kwargs)
- self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
+ self.key_identifier = key_identifier
-class ImageClassificationBase(ImageVertical):
- """ImageClassificationBase.
+class EncryptionProperty(_serialization.Model):
+ """EncryptionProperty.
All required parameters must be populated in order to send to Azure.
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :ivar cosmos_db_resource_id: The byok cosmosdb account that customer brings to store customer's
+ data
+ with encryption.
+ :vartype cosmos_db_resource_id: str
+ :ivar identity: Identity to be used with the keyVault.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityForCmk
+ :ivar key_vault_properties: KeyVault details to do the encryption. Required.
+ :vartype key_vault_properties: ~azure.mgmt.machinelearningservices.models.KeyVaultProperties
+ :ivar search_account_resource_id: The byok search account that customer brings to store
+ customer's data
+ with encryption.
+ :vartype search_account_resource_id: str
+ :ivar status: Indicates whether or not the encryption is enabled for the workspace. Required.
+ Known values are: "Enabled" and "Disabled".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.EncryptionStatus
+ :ivar storage_account_resource_id: The byok storage account that customer brings to store
+ customer's data
+ with encryption.
+ :vartype storage_account_resource_id: str
"""
_validation = {
- "limit_settings": {"required": True},
+ "key_vault_properties": {"required": True},
+ "status": {"required": True},
}
_attribute_map = {
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
+ "cosmos_db_resource_id": {"key": "cosmosDbResourceId", "type": "str"},
+ "identity": {"key": "identity", "type": "IdentityForCmk"},
+ "key_vault_properties": {"key": "keyVaultProperties", "type": "KeyVaultProperties"},
+ "search_account_resource_id": {"key": "searchAccountResourceId", "type": "str"},
+ "status": {"key": "status", "type": "str"},
+ "storage_account_resource_id": {"key": "storageAccountResourceId", "type": "str"},
}
def __init__(
self,
*,
- limit_settings: "_models.ImageLimitSettings",
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
+ key_vault_properties: "_models.KeyVaultProperties",
+ status: Union[str, "_models.EncryptionStatus"],
+ cosmos_db_resource_id: Optional[str] = None,
+ identity: Optional["_models.IdentityForCmk"] = None,
+ search_account_resource_id: Optional[str] = None,
+ storage_account_resource_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :keyword cosmos_db_resource_id: The byok cosmosdb account that customer brings to store
+ customer's data
+ with encryption.
+ :paramtype cosmos_db_resource_id: str
+ :keyword identity: Identity to be used with the keyVault.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityForCmk
+ :keyword key_vault_properties: KeyVault details to do the encryption. Required.
+ :paramtype key_vault_properties: ~azure.mgmt.machinelearningservices.models.KeyVaultProperties
+ :keyword search_account_resource_id: The byok search account that customer brings to store
+ customer's data
+ with encryption.
+ :paramtype search_account_resource_id: str
+ :keyword status: Indicates whether or not the encryption is enabled for the workspace.
+ Required. Known values are: "Enabled" and "Disabled".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.EncryptionStatus
+ :keyword storage_account_resource_id: The byok storage account that customer brings to store
+ customer's data
+ with encryption.
+ :paramtype storage_account_resource_id: str
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- **kwargs
- )
- self.model_settings = model_settings
- self.search_space = search_space
+ super().__init__(**kwargs)
+ self.cosmos_db_resource_id = cosmos_db_resource_id
+ self.identity = identity
+ self.key_vault_properties = key_vault_properties
+ self.search_account_resource_id = search_account_resource_id
+ self.status = status
+ self.storage_account_resource_id = storage_account_resource_id
-class ImageClassification(ImageClassificationBase, AutoMLVertical): # pylint: disable=too-many-instance-attributes
- """Image Classification. Multi-class image classification is used when an image is classified with
- only a single label
- from a set of classes - e.g. each image is classified as either an image of a 'cat' or a 'dog'
- or a 'duck'.
+class EncryptionUpdateProperties(_serialization.Model):
+ """EncryptionUpdateProperties.
All required parameters must be populated in order to send to Azure.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
- :ivar primary_metric: Primary metric to optimize for this task. Known values are:
- "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and
- "PrecisionScoreWeighted".
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ :ivar key_vault_properties: Required.
+ :vartype key_vault_properties:
+ ~azure.mgmt.machinelearningservices.models.EncryptionKeyVaultUpdateProperties
"""
_validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
- "limit_settings": {"required": True},
+ "key_vault_properties": {"required": True},
}
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "key_vault_properties": {"key": "keyVaultProperties", "type": "EncryptionKeyVaultUpdateProperties"},
}
- def __init__(
- self,
- *,
- training_data: "_models.MLTableJobInput",
- limit_settings: "_models.ImageLimitSettings",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
- primary_metric: Optional[Union[str, "_models.ClassificationPrimaryMetrics"]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, key_vault_properties: "_models.EncryptionKeyVaultUpdateProperties", **kwargs: Any) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
- :keyword primary_metric: Primary metric to optimize for this task. Known values are:
- "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and
- "PrecisionScoreWeighted".
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ :keyword key_vault_properties: Required.
+ :paramtype key_vault_properties:
+ ~azure.mgmt.machinelearningservices.models.EncryptionKeyVaultUpdateProperties
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- model_settings=model_settings,
- search_space=search_space,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "ImageClassification"
- self.training_data = training_data
- self.primary_metric = primary_metric
- self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.model_settings = model_settings
- self.search_space = search_space
-
+ super().__init__(**kwargs)
+ self.key_vault_properties = key_vault_properties
-class ImageClassificationMultilabel(
- ImageClassificationBase, AutoMLVertical
-): # pylint: disable=too-many-instance-attributes
- """Image Classification Multilabel. Multi-label image classification is used when an image could
- have one or more labels
- from a set of labels - e.g. an image could be labeled with both 'cat' and 'dog'.
- All required parameters must be populated in order to send to Azure.
+class Endpoint(_serialization.Model):
+ """Endpoint.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
- :ivar primary_metric: Primary metric to optimize for this task. Known values are:
- "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
- "PrecisionScoreWeighted", and "IOU".
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
+ :ivar protocol: Protocol over which communication will happen over this endpoint. Known values
+ are: "tcp", "udp", and "http".
+ :vartype protocol: str or ~azure.mgmt.machinelearningservices.models.Protocol
+ :ivar name: Name of the Endpoint.
+ :vartype name: str
+ :ivar target: Application port inside the container.
+ :vartype target: int
+ :ivar published: Port over which the application is exposed from container.
+ :vartype published: int
+ :ivar host_ip: Host IP over which the application is exposed from the container.
+ :vartype host_ip: str
"""
- _validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
- "limit_settings": {"required": True},
- }
-
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "protocol": {"key": "protocol", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "target": {"key": "target", "type": "int"},
+ "published": {"key": "published", "type": "int"},
+ "host_ip": {"key": "hostIp", "type": "str"},
}
def __init__(
self,
*,
- training_data: "_models.MLTableJobInput",
- limit_settings: "_models.ImageLimitSettings",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
- primary_metric: Optional[Union[str, "_models.ClassificationMultilabelPrimaryMetrics"]] = None,
+ protocol: Union[str, "_models.Protocol"] = "tcp",
+ name: Optional[str] = None,
+ target: Optional[int] = None,
+ published: Optional[int] = None,
+ host_ip: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
- :keyword primary_metric: Primary metric to optimize for this task. Known values are:
- "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
- "PrecisionScoreWeighted", and "IOU".
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
+ :keyword protocol: Protocol over which communication will happen over this endpoint. Known
+ values are: "tcp", "udp", and "http".
+ :paramtype protocol: str or ~azure.mgmt.machinelearningservices.models.Protocol
+ :keyword name: Name of the Endpoint.
+ :paramtype name: str
+ :keyword target: Application port inside the container.
+ :paramtype target: int
+ :keyword published: Port over which the application is exposed from container.
+ :paramtype published: int
+ :keyword host_ip: Host IP over which the application is exposed from the container.
+ :paramtype host_ip: str
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- model_settings=model_settings,
- search_space=search_space,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "ImageClassificationMultilabel"
- self.training_data = training_data
- self.primary_metric = primary_metric
- self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.model_settings = model_settings
- self.search_space = search_space
-
+ super().__init__(**kwargs)
+ self.protocol = protocol
+ self.name = name
+ self.target = target
+ self.published = published
+ self.host_ip = host_ip
-class ImageObjectDetectionBase(ImageVertical):
- """ImageObjectDetectionBase.
- All required parameters must be populated in order to send to Azure.
+class EndpointAuthKeys(_serialization.Model):
+ """Keys for endpoint authentication.
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :ivar primary_key: The primary key.
+ :vartype primary_key: str
+ :ivar secondary_key: The secondary key.
+ :vartype secondary_key: str
"""
- _validation = {
- "limit_settings": {"required": True},
+ _attribute_map = {
+ "primary_key": {"key": "primaryKey", "type": "str"},
+ "secondary_key": {"key": "secondaryKey", "type": "str"},
}
+ def __init__(
+ self, *, primary_key: Optional[str] = None, secondary_key: Optional[str] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword primary_key: The primary key.
+ :paramtype primary_key: str
+ :keyword secondary_key: The secondary key.
+ :paramtype secondary_key: str
+ """
+ super().__init__(**kwargs)
+ self.primary_key = primary_key
+ self.secondary_key = secondary_key
+
+
+class EndpointAuthToken(_serialization.Model):
+ """Service Token.
+
+ :ivar access_token: Access token for endpoint authentication.
+ :vartype access_token: str
+ :ivar expiry_time_utc: Access token expiry time (UTC).
+ :vartype expiry_time_utc: int
+ :ivar refresh_after_time_utc: Refresh access token after time (UTC).
+ :vartype refresh_after_time_utc: int
+ :ivar token_type: Access token type.
+ :vartype token_type: str
+ """
+
_attribute_map = {
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
+ "access_token": {"key": "accessToken", "type": "str"},
+ "expiry_time_utc": {"key": "expiryTimeUtc", "type": "int"},
+ "refresh_after_time_utc": {"key": "refreshAfterTimeUtc", "type": "int"},
+ "token_type": {"key": "tokenType", "type": "str"},
}
def __init__(
self,
*,
- limit_settings: "_models.ImageLimitSettings",
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
+ access_token: Optional[str] = None,
+ expiry_time_utc: int = 0,
+ refresh_after_time_utc: int = 0,
+ token_type: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :keyword access_token: Access token for endpoint authentication.
+ :paramtype access_token: str
+ :keyword expiry_time_utc: Access token expiry time (UTC).
+ :paramtype expiry_time_utc: int
+ :keyword refresh_after_time_utc: Refresh access token after time (UTC).
+ :paramtype refresh_after_time_utc: int
+ :keyword token_type: Access token type.
+ :paramtype token_type: str
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- **kwargs
- )
- self.model_settings = model_settings
- self.search_space = search_space
+ super().__init__(**kwargs)
+ self.access_token = access_token
+ self.expiry_time_utc = expiry_time_utc
+ self.refresh_after_time_utc = refresh_after_time_utc
+ self.token_type = token_type
-class ImageInstanceSegmentation(
- ImageObjectDetectionBase, AutoMLVertical
-): # pylint: disable=too-many-instance-attributes
- """Image Instance Segmentation. Instance segmentation is used to identify objects in an image at
- the pixel level,
- drawing a polygon around each object in the image.
+class EndpointScheduleAction(ScheduleActionBase):
+ """EndpointScheduleAction.
All required parameters must be populated in order to send to Azure.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
- :ivar primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
+ :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
+ are: "CreateJob", "InvokeBatchEndpoint", "ImportData", and "CreateMonitor".
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
+ :ivar endpoint_invocation_definition: [Required] Defines Schedule action definition details.
+
+
+ .. raw:: html
+
+ . Required.
+ :vartype endpoint_invocation_definition: JSON
"""
_validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
- "limit_settings": {"required": True},
+ "action_type": {"required": True},
+ "endpoint_invocation_definition": {"required": True},
}
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "action_type": {"key": "actionType", "type": "str"},
+ "endpoint_invocation_definition": {"key": "endpointInvocationDefinition", "type": "object"},
}
- def __init__(
- self,
- *,
- training_data: "_models.MLTableJobInput",
- limit_settings: "_models.ImageLimitSettings",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
- primary_metric: Optional[Union[str, "_models.InstanceSegmentationPrimaryMetrics"]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, endpoint_invocation_definition: JSON, **kwargs: Any) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
- :keyword primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
+ :keyword endpoint_invocation_definition: [Required] Defines Schedule action definition details.
+
+
+ .. raw:: html
+
+ . Required.
+ :paramtype endpoint_invocation_definition: JSON
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- model_settings=model_settings,
- search_space=search_space,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "ImageInstanceSegmentation"
- self.training_data = training_data
- self.primary_metric = primary_metric
- self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.model_settings = model_settings
- self.search_space = search_space
+ super().__init__(**kwargs)
+ self.action_type: str = "InvokeBatchEndpoint"
+ self.endpoint_invocation_definition = endpoint_invocation_definition
-class ImageLimitSettings(_serialization.Model):
- """Limit settings for the AutoML job.
+class EnvironmentContainer(ProxyResource):
+ """Azure Resource Manager resource envelope.
- :ivar max_concurrent_trials: Maximum number of concurrent AutoML iterations.
- :vartype max_concurrent_trials: int
- :ivar max_trials: Maximum number of AutoML iterations.
- :vartype max_trials: int
- :ivar timeout: AutoML job timeout.
- :vartype timeout: ~datetime.timedelta
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentContainerProperties
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
_attribute_map = {
- "max_concurrent_trials": {"key": "maxConcurrentTrials", "type": "int"},
- "max_trials": {"key": "maxTrials", "type": "int"},
- "timeout": {"key": "timeout", "type": "duration"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "EnvironmentContainerProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.EnvironmentContainerProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.EnvironmentContainerProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class EnvironmentContainerProperties(AssetContainer):
+ """Container for environment specification versions.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar latest_version: The latest version inside this container.
+ :vartype latest_version: str
+ :ivar next_version: The next auto incremental version.
+ :vartype next_version: str
+ :ivar provisioning_state: Provisioning state for the environment container. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ """
+
+ _validation = {
+ "latest_version": {"readonly": True},
+ "next_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "latest_version": {"key": "latestVersion", "type": "str"},
+ "next_version": {"key": "nextVersion", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
}
def __init__(
- self, *, max_concurrent_trials: int = 1, max_trials: int = 1, timeout: datetime.timedelta = "P7D", **kwargs: Any
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_archived: bool = False,
+ **kwargs: Any
) -> None:
"""
- :keyword max_concurrent_trials: Maximum number of concurrent AutoML iterations.
- :paramtype max_concurrent_trials: int
- :keyword max_trials: Maximum number of AutoML iterations.
- :paramtype max_trials: int
- :keyword timeout: AutoML job timeout.
- :paramtype timeout: ~datetime.timedelta
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
"""
- super().__init__(**kwargs)
- self.max_concurrent_trials = max_concurrent_trials
- self.max_trials = max_trials
- self.timeout = timeout
+ super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
+ self.provisioning_state = None
-class ImageMetadata(_serialization.Model):
- """Returns metadata about the operating system image for this compute instance.
+class EnvironmentContainerResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of EnvironmentContainer entities.
- :ivar current_image_version: Specifies the current operating system image version this compute
- instance is running on.
- :vartype current_image_version: str
- :ivar latest_image_version: Specifies the latest available operating system image version.
- :vartype latest_image_version: str
- :ivar is_latest_os_image_version: Specifies whether this compute instance is running on the
- latest operating system image.
- :vartype is_latest_os_image_version: bool
+ :ivar next_link: The link to the next page of EnvironmentContainer objects. If null, there are
+ no additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type EnvironmentContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentContainer]
"""
_attribute_map = {
- "current_image_version": {"key": "currentImageVersion", "type": "str"},
- "latest_image_version": {"key": "latestImageVersion", "type": "str"},
- "is_latest_os_image_version": {"key": "isLatestOsImageVersion", "type": "bool"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[EnvironmentContainer]"},
}
def __init__(
self,
*,
- current_image_version: Optional[str] = None,
- latest_image_version: Optional[str] = None,
- is_latest_os_image_version: Optional[bool] = None,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.EnvironmentContainer"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword current_image_version: Specifies the current operating system image version this
- compute instance is running on.
- :paramtype current_image_version: str
- :keyword latest_image_version: Specifies the latest available operating system image version.
- :paramtype latest_image_version: str
- :keyword is_latest_os_image_version: Specifies whether this compute instance is running on the
- latest operating system image.
- :paramtype is_latest_os_image_version: bool
+ :keyword next_link: The link to the next page of EnvironmentContainer objects. If null, there
+ are no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type EnvironmentContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentContainer]
"""
super().__init__(**kwargs)
- self.current_image_version = current_image_version
- self.latest_image_version = latest_image_version
- self.is_latest_os_image_version = is_latest_os_image_version
+ self.next_link = next_link
+ self.value = value
-class ImageModelDistributionSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes
- """Distribution expressions to sweep over values of model settings.
+class EnvironmentVariable(_serialization.Model):
+ """EnvironmentVariable.
- :code:`
- Some examples are:
- ```
- ModelName = "choice('seresnext', 'resnest50')";
- LearningRate = "uniform(0.001, 0.01)";
- LayersToFreeze = "choice(0, 2)";
- ````
- All distributions can be specified as distribution_name(min, max) or choice(val1, val2, ...,
- valn)
- where distribution name can be: uniform, quniform, loguniform, etc
- For more details on how to compose distribution expressions please check the documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
-
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: str
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: str
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: str
- :ivar distributed: Whether to use distributer training.
- :vartype distributed: str
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: str
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: str
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: str
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: str
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: str
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: str
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: str
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: str
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :vartype learning_rate_scheduler: str
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: str
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: str
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: str
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: str
- :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :vartype optimizer: str
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: str
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: str
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: str
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: str
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: str
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: str
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: str
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: str
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :vartype additional_properties: dict[str, any]
+ :ivar type: Type of the Environment Variable. Possible values are: local - For local variable.
+ "local"
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.EnvironmentVariableType
+ :ivar value: Value of the Environment variable.
+ :vartype value: str
"""
_attribute_map = {
- "ams_gradient": {"key": "amsGradient", "type": "str"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "str"},
- "beta2": {"key": "beta2", "type": "str"},
- "distributed": {"key": "distributed", "type": "str"},
- "early_stopping": {"key": "earlyStopping", "type": "str"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
- "learning_rate": {"key": "learningRate", "type": "str"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "str"},
- "nesterov": {"key": "nesterov", "type": "str"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "str"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
- "weight_decay": {"key": "weightDecay", "type": "str"},
+ "additional_properties": {"key": "", "type": "{object}"},
+ "type": {"key": "type", "type": "str"},
+ "value": {"key": "value", "type": "str"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(
self,
*,
- ams_gradient: Optional[str] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[str] = None,
- beta2: Optional[str] = None,
- distributed: Optional[str] = None,
- early_stopping: Optional[str] = None,
- early_stopping_delay: Optional[str] = None,
- early_stopping_patience: Optional[str] = None,
- enable_onnx_normalization: Optional[str] = None,
- evaluation_frequency: Optional[str] = None,
- gradient_accumulation_step: Optional[str] = None,
- layers_to_freeze: Optional[str] = None,
- learning_rate: Optional[str] = None,
- learning_rate_scheduler: Optional[str] = None,
- model_name: Optional[str] = None,
- momentum: Optional[str] = None,
- nesterov: Optional[str] = None,
- number_of_epochs: Optional[str] = None,
- number_of_workers: Optional[str] = None,
- optimizer: Optional[str] = None,
- random_seed: Optional[str] = None,
- step_lr_gamma: Optional[str] = None,
- step_lr_step_size: Optional[str] = None,
- training_batch_size: Optional[str] = None,
- validation_batch_size: Optional[str] = None,
- warmup_cosine_lr_cycles: Optional[str] = None,
- warmup_cosine_lr_warmup_epochs: Optional[str] = None,
- weight_decay: Optional[str] = None,
+ additional_properties: Optional[Dict[str, Any]] = None,
+ type: Union[str, "_models.EnvironmentVariableType"] = "local",
+ value: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: str
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: str
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: str
- :keyword distributed: Whether to use distributer training.
- :paramtype distributed: str
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: str
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: str
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: str
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: str
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: str
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: str
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: str
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: str
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :paramtype learning_rate_scheduler: str
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: str
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: str
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: str
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: str
- :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :paramtype optimizer: str
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: str
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: str
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: str
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: str
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: str
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: str
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: str
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: str
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ :keyword type: Type of the Environment Variable. Possible values are: local - For local
+ variable. "local"
+ :paramtype type: str or ~azure.mgmt.machinelearningservices.models.EnvironmentVariableType
+ :keyword value: Value of the Environment variable.
+ :paramtype value: str
"""
super().__init__(**kwargs)
- self.ams_gradient = ams_gradient
- self.augmentations = augmentations
- self.beta1 = beta1
- self.beta2 = beta2
- self.distributed = distributed
- self.early_stopping = early_stopping
- self.early_stopping_delay = early_stopping_delay
- self.early_stopping_patience = early_stopping_patience
- self.enable_onnx_normalization = enable_onnx_normalization
- self.evaluation_frequency = evaluation_frequency
- self.gradient_accumulation_step = gradient_accumulation_step
- self.layers_to_freeze = layers_to_freeze
- self.learning_rate = learning_rate
- self.learning_rate_scheduler = learning_rate_scheduler
- self.model_name = model_name
- self.momentum = momentum
- self.nesterov = nesterov
- self.number_of_epochs = number_of_epochs
- self.number_of_workers = number_of_workers
- self.optimizer = optimizer
- self.random_seed = random_seed
- self.step_lr_gamma = step_lr_gamma
- self.step_lr_step_size = step_lr_step_size
- self.training_batch_size = training_batch_size
- self.validation_batch_size = validation_batch_size
- self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
- self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
- self.weight_decay = weight_decay
+ self.additional_properties = additional_properties
+ self.type = type
+ self.value = value
-class ImageModelDistributionSettingsClassification(
- ImageModelDistributionSettings
-): # pylint: disable=too-many-instance-attributes
- """Distribution expressions to sweep over values of model settings.
+class EnvironmentVersion(ProxyResource):
+ """Azure Resource Manager resource envelope.
- :code:`
- Some examples are:
- ```
- ModelName = "choice('seresnext', 'resnest50')";
- LearningRate = "uniform(0.001, 0.01)";
- LayersToFreeze = "choice(0, 2)";
- ````
- For more details on how to compose distribution expressions please check the documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: str
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: str
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: str
- :ivar distributed: Whether to use distributer training.
- :vartype distributed: str
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: str
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: str
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: str
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: str
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: str
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: str
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: str
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: str
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :vartype learning_rate_scheduler: str
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: str
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: str
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: str
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: str
- :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :vartype optimizer: str
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: str
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: str
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: str
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: str
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: str
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: str
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: str
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: str
- :ivar training_crop_size: Image crop size that is input to the neural network for the training
- dataset. Must be a positive integer.
- :vartype training_crop_size: str
- :ivar validation_crop_size: Image crop size that is input to the neural network for the
- validation dataset. Must be a positive integer.
- :vartype validation_crop_size: str
- :ivar validation_resize_size: Image size to which to resize before cropping for validation
- dataset. Must be a positive integer.
- :vartype validation_resize_size: str
- :ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
- 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
- 0 or 1 or 2.
- :vartype weighted_loss: str
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentVersionProperties
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
_attribute_map = {
- "ams_gradient": {"key": "amsGradient", "type": "str"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "str"},
- "beta2": {"key": "beta2", "type": "str"},
- "distributed": {"key": "distributed", "type": "str"},
- "early_stopping": {"key": "earlyStopping", "type": "str"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
- "learning_rate": {"key": "learningRate", "type": "str"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "str"},
- "nesterov": {"key": "nesterov", "type": "str"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "str"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
- "weight_decay": {"key": "weightDecay", "type": "str"},
- "training_crop_size": {"key": "trainingCropSize", "type": "str"},
- "validation_crop_size": {"key": "validationCropSize", "type": "str"},
- "validation_resize_size": {"key": "validationResizeSize", "type": "str"},
- "weighted_loss": {"key": "weightedLoss", "type": "str"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "EnvironmentVersionProperties"},
}
- def __init__( # pylint: disable=too-many-locals
- self,
- *,
- ams_gradient: Optional[str] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[str] = None,
- beta2: Optional[str] = None,
- distributed: Optional[str] = None,
- early_stopping: Optional[str] = None,
- early_stopping_delay: Optional[str] = None,
- early_stopping_patience: Optional[str] = None,
- enable_onnx_normalization: Optional[str] = None,
- evaluation_frequency: Optional[str] = None,
- gradient_accumulation_step: Optional[str] = None,
- layers_to_freeze: Optional[str] = None,
- learning_rate: Optional[str] = None,
- learning_rate_scheduler: Optional[str] = None,
- model_name: Optional[str] = None,
- momentum: Optional[str] = None,
- nesterov: Optional[str] = None,
- number_of_epochs: Optional[str] = None,
- number_of_workers: Optional[str] = None,
- optimizer: Optional[str] = None,
- random_seed: Optional[str] = None,
- step_lr_gamma: Optional[str] = None,
- step_lr_step_size: Optional[str] = None,
- training_batch_size: Optional[str] = None,
- validation_batch_size: Optional[str] = None,
- warmup_cosine_lr_cycles: Optional[str] = None,
- warmup_cosine_lr_warmup_epochs: Optional[str] = None,
- weight_decay: Optional[str] = None,
- training_crop_size: Optional[str] = None,
- validation_crop_size: Optional[str] = None,
- validation_resize_size: Optional[str] = None,
- weighted_loss: Optional[str] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, properties: "_models.EnvironmentVersionProperties", **kwargs: Any) -> None:
"""
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: str
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: str
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: str
- :keyword distributed: Whether to use distributer training.
- :paramtype distributed: str
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: str
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: str
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: str
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: str
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: str
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: str
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: str
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: str
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :paramtype learning_rate_scheduler: str
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: str
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: str
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: str
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: str
- :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :paramtype optimizer: str
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: str
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: str
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: str
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: str
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: str
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: str
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: str
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: str
- :keyword training_crop_size: Image crop size that is input to the neural network for the
- training dataset. Must be a positive integer.
- :paramtype training_crop_size: str
- :keyword validation_crop_size: Image crop size that is input to the neural network for the
- validation dataset. Must be a positive integer.
- :paramtype validation_crop_size: str
- :keyword validation_resize_size: Image size to which to resize before cropping for validation
- dataset. Must be a positive integer.
- :paramtype validation_resize_size: str
- :keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
- 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
- 0 or 1 or 2.
- :paramtype weighted_loss: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentVersionProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class EnvironmentVersionProperties(AssetBase): # pylint: disable=too-many-instance-attributes
+ """Environment version details.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :vartype is_archived: bool
+ :ivar auto_rebuild: Defines if image needs to be rebuilt based on base image changes. Known
+ values are: "Disabled" and "OnBaseImageUpdate".
+ :vartype auto_rebuild: str or ~azure.mgmt.machinelearningservices.models.AutoRebuildSetting
+ :ivar build: Configuration settings for Docker build context.
+ :vartype build: ~azure.mgmt.machinelearningservices.models.BuildContext
+ :ivar conda_file: Standard configuration file used by Conda that lets you install any kind of
+ package, including Python, R, and C/C++ packages.
+
+
+ .. raw:: html
+
+ .
+ :vartype conda_file: str
+ :ivar environment_type: Environment type is either user managed or curated by the Azure ML
+ service
+
+
+ .. raw:: html
+
+ . Known values are: "Curated" and "UserCreated".
+ :vartype environment_type: str or ~azure.mgmt.machinelearningservices.models.EnvironmentType
+ :ivar image: Name of the image that will be used for the environment.
+
+
+ .. raw:: html
+
+ .
+ :vartype image: str
+ :ivar inference_config: Defines configuration specific to inference.
+ :vartype inference_config:
+ ~azure.mgmt.machinelearningservices.models.InferenceContainerProperties
+ :ivar intellectual_property: Intellectual Property details. Used if environment is an
+ Intellectual Property.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :ivar os_type: The OS type of the environment. Known values are: "Linux" and "Windows".
+ :vartype os_type: str or ~azure.mgmt.machinelearningservices.models.OperatingSystemType
+ :ivar provisioning_state: Provisioning state for the environment version. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar stage: Stage in the environment lifecycle assigned to this environment.
+ :vartype stage: str
+ """
+
+ _validation = {
+ "environment_type": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "auto_rebuild": {"key": "autoRebuild", "type": "str"},
+ "build": {"key": "build", "type": "BuildContext"},
+ "conda_file": {"key": "condaFile", "type": "str"},
+ "environment_type": {"key": "environmentType", "type": "str"},
+ "image": {"key": "image", "type": "str"},
+ "inference_config": {"key": "inferenceConfig", "type": "InferenceContainerProperties"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
+ "os_type": {"key": "osType", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "stage": {"key": "stage", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ auto_rebuild: Optional[Union[str, "_models.AutoRebuildSetting"]] = None,
+ build: Optional["_models.BuildContext"] = None,
+ conda_file: Optional[str] = None,
+ image: Optional[str] = None,
+ inference_config: Optional["_models.InferenceContainerProperties"] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ os_type: Optional[Union[str, "_models.OperatingSystemType"]] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :paramtype is_archived: bool
+ :keyword auto_rebuild: Defines if image needs to be rebuilt based on base image changes. Known
+ values are: "Disabled" and "OnBaseImageUpdate".
+ :paramtype auto_rebuild: str or ~azure.mgmt.machinelearningservices.models.AutoRebuildSetting
+ :keyword build: Configuration settings for Docker build context.
+ :paramtype build: ~azure.mgmt.machinelearningservices.models.BuildContext
+ :keyword conda_file: Standard configuration file used by Conda that lets you install any kind
+ of package, including Python, R, and C/C++ packages.
+
+
+ .. raw:: html
+
+ .
+ :paramtype conda_file: str
+ :keyword image: Name of the image that will be used for the environment.
+
+
+ .. raw:: html
+
+ .
+ :paramtype image: str
+ :keyword inference_config: Defines configuration specific to inference.
+ :paramtype inference_config:
+ ~azure.mgmt.machinelearningservices.models.InferenceContainerProperties
+ :keyword intellectual_property: Intellectual Property details. Used if environment is an
+ Intellectual Property.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword os_type: The OS type of the environment. Known values are: "Linux" and "Windows".
+ :paramtype os_type: str or ~azure.mgmt.machinelearningservices.models.OperatingSystemType
+ :keyword stage: Stage in the environment lifecycle assigned to this environment.
+ :paramtype stage: str
"""
super().__init__(
- ams_gradient=ams_gradient,
- augmentations=augmentations,
- beta1=beta1,
- beta2=beta2,
- distributed=distributed,
- early_stopping=early_stopping,
- early_stopping_delay=early_stopping_delay,
- early_stopping_patience=early_stopping_patience,
- enable_onnx_normalization=enable_onnx_normalization,
- evaluation_frequency=evaluation_frequency,
- gradient_accumulation_step=gradient_accumulation_step,
- layers_to_freeze=layers_to_freeze,
- learning_rate=learning_rate,
- learning_rate_scheduler=learning_rate_scheduler,
- model_name=model_name,
- momentum=momentum,
- nesterov=nesterov,
- number_of_epochs=number_of_epochs,
- number_of_workers=number_of_workers,
- optimizer=optimizer,
- random_seed=random_seed,
- step_lr_gamma=step_lr_gamma,
- step_lr_step_size=step_lr_step_size,
- training_batch_size=training_batch_size,
- validation_batch_size=validation_batch_size,
- warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
- warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
- weight_decay=weight_decay,
+ description=description,
+ properties=properties,
+ tags=tags,
+ auto_delete_setting=auto_delete_setting,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
**kwargs
)
- self.training_crop_size = training_crop_size
- self.validation_crop_size = validation_crop_size
- self.validation_resize_size = validation_resize_size
- self.weighted_loss = weighted_loss
+ self.auto_rebuild = auto_rebuild
+ self.build = build
+ self.conda_file = conda_file
+ self.environment_type = None
+ self.image = image
+ self.inference_config = inference_config
+ self.intellectual_property = intellectual_property
+ self.os_type = os_type
+ self.provisioning_state = None
+ self.stage = stage
-class ImageModelDistributionSettingsObjectDetection(
- ImageModelDistributionSettings
-): # pylint: disable=too-many-instance-attributes
- """Distribution expressions to sweep over values of model settings.
+class EnvironmentVersionResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of EnvironmentVersion entities.
- :code:`
- Some examples are:
- ```
- ModelName = "choice('seresnext', 'resnest50')";
- LearningRate = "uniform(0.001, 0.01)";
- LayersToFreeze = "choice(0, 2)";
- ````
- For more details on how to compose distribution expressions please check the documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
-
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: str
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: str
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: str
- :ivar distributed: Whether to use distributer training.
- :vartype distributed: str
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: str
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: str
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: str
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: str
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: str
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: str
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: str
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: str
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :vartype learning_rate_scheduler: str
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: str
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: str
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: str
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: str
- :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :vartype optimizer: str
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: str
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: str
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: str
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: str
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: str
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: str
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: str
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: str
- :ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
- be a positive integer.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype box_detections_per_image: str
- :ivar box_score_threshold: During inference, only return proposals with a classification score
- greater than
- BoxScoreThreshold. Must be a float in the range[0, 1].
- :vartype box_score_threshold: str
- :ivar image_size: Image size for train and validation. Must be a positive integer.
- Note: The training run may get into CUDA OOM if the size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype image_size: str
- :ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype max_size: str
- :ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype min_size: str
- :ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
- Note: training run may get into CUDA OOM if the model size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype model_size: str
- :ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
- Note: training run may get into CUDA OOM if no sufficient GPU memory.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype multi_scale: str
- :ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
- float in the range [0, 1].
- :vartype nms_iou_threshold: str
- :ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
- be
- None to enable small object detection logic. A string containing two integers in mxn format.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_grid_size: str
- :ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
- in the range [0, 1).
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_overlap_ratio: str
- :ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
- predictions from tiles and image.
- Used in validation/ inference. Must be float in the range [0, 1].
- Note: This settings is not supported for the 'yolov5' algorithm.
- NMS: Non-maximum suppression.
- :vartype tile_predictions_nms_threshold: str
- :ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
- float in the range [0, 1].
- :vartype validation_iou_threshold: str
- :ivar validation_metric_type: Metric computation method to use for validation metrics. Must be
- 'none', 'coco', 'voc', or 'coco_voc'.
- :vartype validation_metric_type: str
+ :ivar next_link: The link to the next page of EnvironmentVersion objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type EnvironmentVersion.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentVersion]
"""
_attribute_map = {
- "ams_gradient": {"key": "amsGradient", "type": "str"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "str"},
- "beta2": {"key": "beta2", "type": "str"},
- "distributed": {"key": "distributed", "type": "str"},
- "early_stopping": {"key": "earlyStopping", "type": "str"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
- "learning_rate": {"key": "learningRate", "type": "str"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "str"},
- "nesterov": {"key": "nesterov", "type": "str"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "str"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
- "weight_decay": {"key": "weightDecay", "type": "str"},
- "box_detections_per_image": {"key": "boxDetectionsPerImage", "type": "str"},
- "box_score_threshold": {"key": "boxScoreThreshold", "type": "str"},
- "image_size": {"key": "imageSize", "type": "str"},
- "max_size": {"key": "maxSize", "type": "str"},
- "min_size": {"key": "minSize", "type": "str"},
- "model_size": {"key": "modelSize", "type": "str"},
- "multi_scale": {"key": "multiScale", "type": "str"},
- "nms_iou_threshold": {"key": "nmsIouThreshold", "type": "str"},
- "tile_grid_size": {"key": "tileGridSize", "type": "str"},
- "tile_overlap_ratio": {"key": "tileOverlapRatio", "type": "str"},
- "tile_predictions_nms_threshold": {"key": "tilePredictionsNmsThreshold", "type": "str"},
- "validation_iou_threshold": {"key": "validationIouThreshold", "type": "str"},
- "validation_metric_type": {"key": "validationMetricType", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[EnvironmentVersion]"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(
self,
*,
- ams_gradient: Optional[str] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[str] = None,
- beta2: Optional[str] = None,
- distributed: Optional[str] = None,
- early_stopping: Optional[str] = None,
- early_stopping_delay: Optional[str] = None,
- early_stopping_patience: Optional[str] = None,
- enable_onnx_normalization: Optional[str] = None,
- evaluation_frequency: Optional[str] = None,
- gradient_accumulation_step: Optional[str] = None,
- layers_to_freeze: Optional[str] = None,
- learning_rate: Optional[str] = None,
- learning_rate_scheduler: Optional[str] = None,
- model_name: Optional[str] = None,
- momentum: Optional[str] = None,
- nesterov: Optional[str] = None,
- number_of_epochs: Optional[str] = None,
- number_of_workers: Optional[str] = None,
- optimizer: Optional[str] = None,
- random_seed: Optional[str] = None,
- step_lr_gamma: Optional[str] = None,
- step_lr_step_size: Optional[str] = None,
- training_batch_size: Optional[str] = None,
- validation_batch_size: Optional[str] = None,
- warmup_cosine_lr_cycles: Optional[str] = None,
- warmup_cosine_lr_warmup_epochs: Optional[str] = None,
- weight_decay: Optional[str] = None,
- box_detections_per_image: Optional[str] = None,
- box_score_threshold: Optional[str] = None,
- image_size: Optional[str] = None,
- max_size: Optional[str] = None,
- min_size: Optional[str] = None,
- model_size: Optional[str] = None,
- multi_scale: Optional[str] = None,
- nms_iou_threshold: Optional[str] = None,
- tile_grid_size: Optional[str] = None,
- tile_overlap_ratio: Optional[str] = None,
- tile_predictions_nms_threshold: Optional[str] = None,
- validation_iou_threshold: Optional[str] = None,
- validation_metric_type: Optional[str] = None,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.EnvironmentVersion"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: str
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: str
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: str
- :keyword distributed: Whether to use distributer training.
- :paramtype distributed: str
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: str
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: str
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
+ :keyword next_link: The link to the next page of EnvironmentVersion objects. If null, there are
+ no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type EnvironmentVersion.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentVersion]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class ErrorAdditionalInfo(_serialization.Model):
+ """The resource management error additional info.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar type: The additional info type.
+ :vartype type: str
+ :ivar info: The additional info.
+ :vartype info: JSON
+ """
+
+ _validation = {
+ "type": {"readonly": True},
+ "info": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "type": {"key": "type", "type": "str"},
+ "info": {"key": "info", "type": "object"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.type = None
+ self.info = None
+
+
+class ErrorDetail(_serialization.Model):
+ """The error detail.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar code: The error code.
+ :vartype code: str
+ :ivar message: The error message.
+ :vartype message: str
+ :ivar target: The error target.
+ :vartype target: str
+ :ivar details: The error details.
+ :vartype details: list[~azure.mgmt.machinelearningservices.models.ErrorDetail]
+ :ivar additional_info: The error additional info.
+ :vartype additional_info: list[~azure.mgmt.machinelearningservices.models.ErrorAdditionalInfo]
+ """
+
+ _validation = {
+ "code": {"readonly": True},
+ "message": {"readonly": True},
+ "target": {"readonly": True},
+ "details": {"readonly": True},
+ "additional_info": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "code": {"key": "code", "type": "str"},
+ "message": {"key": "message", "type": "str"},
+ "target": {"key": "target", "type": "str"},
+ "details": {"key": "details", "type": "[ErrorDetail]"},
+ "additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.code = None
+ self.message = None
+ self.target = None
+ self.details = None
+ self.additional_info = None
+
+
+class ErrorResponse(_serialization.Model):
+ """Common error response for all Azure Resource Manager APIs to return error details for failed
+ operations. (This also follows the OData error response format.).
+
+ :ivar error: The error object.
+ :vartype error: ~azure.mgmt.machinelearningservices.models.ErrorDetail
+ """
+
+ _attribute_map = {
+ "error": {"key": "error", "type": "ErrorDetail"},
+ }
+
+ def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None:
+ """
+ :keyword error: The error object.
+ :paramtype error: ~azure.mgmt.machinelearningservices.models.ErrorDetail
+ """
+ super().__init__(**kwargs)
+ self.error = error
+
+
+class EstimatedVMPrice(_serialization.Model):
+ """The estimated price info for using a VM of a particular OS type, tier, etc.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar retail_price: The price charged for using the VM. Required.
+ :vartype retail_price: float
+ :ivar os_type: Operating system type used by the VM. Required. Known values are: "Linux" and
+ "Windows".
+ :vartype os_type: str or ~azure.mgmt.machinelearningservices.models.VMPriceOSType
+ :ivar vm_tier: The type of the VM. Required. Known values are: "Standard", "LowPriority", and
+ "Spot".
+ :vartype vm_tier: str or ~azure.mgmt.machinelearningservices.models.VMTier
+ """
+
+ _validation = {
+ "retail_price": {"required": True},
+ "os_type": {"required": True},
+ "vm_tier": {"required": True},
+ }
+
+ _attribute_map = {
+ "retail_price": {"key": "retailPrice", "type": "float"},
+ "os_type": {"key": "osType", "type": "str"},
+ "vm_tier": {"key": "vmTier", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ retail_price: float,
+ os_type: Union[str, "_models.VMPriceOSType"],
+ vm_tier: Union[str, "_models.VMTier"],
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword retail_price: The price charged for using the VM. Required.
+ :paramtype retail_price: float
+ :keyword os_type: Operating system type used by the VM. Required. Known values are: "Linux" and
+ "Windows".
+ :paramtype os_type: str or ~azure.mgmt.machinelearningservices.models.VMPriceOSType
+ :keyword vm_tier: The type of the VM. Required. Known values are: "Standard", "LowPriority",
+ and "Spot".
+ :paramtype vm_tier: str or ~azure.mgmt.machinelearningservices.models.VMTier
+ """
+ super().__init__(**kwargs)
+ self.retail_price = retail_price
+ self.os_type = os_type
+ self.vm_tier = vm_tier
+
+
+class EstimatedVMPrices(_serialization.Model):
+ """The estimated price info for using a VM.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar billing_currency: Three lettered code specifying the currency of the VM price. Example:
+ USD. Required. "USD"
+ :vartype billing_currency: str or ~azure.mgmt.machinelearningservices.models.BillingCurrency
+ :ivar unit_of_measure: The unit of time measurement for the specified VM price. Example:
+ OneHour. Required. "OneHour"
+ :vartype unit_of_measure: str or ~azure.mgmt.machinelearningservices.models.UnitOfMeasure
+ :ivar values: The list of estimated prices for using a VM of a particular OS type, tier, etc.
+ Required.
+ :vartype values: list[~azure.mgmt.machinelearningservices.models.EstimatedVMPrice]
+ """
+
+ _validation = {
+ "billing_currency": {"required": True},
+ "unit_of_measure": {"required": True},
+ "values": {"required": True},
+ }
+
+ _attribute_map = {
+ "billing_currency": {"key": "billingCurrency", "type": "str"},
+ "unit_of_measure": {"key": "unitOfMeasure", "type": "str"},
+ "values": {"key": "values", "type": "[EstimatedVMPrice]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ billing_currency: Union[str, "_models.BillingCurrency"],
+ unit_of_measure: Union[str, "_models.UnitOfMeasure"],
+ values: List["_models.EstimatedVMPrice"],
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword billing_currency: Three lettered code specifying the currency of the VM price.
+ Example: USD. Required. "USD"
+ :paramtype billing_currency: str or ~azure.mgmt.machinelearningservices.models.BillingCurrency
+ :keyword unit_of_measure: The unit of time measurement for the specified VM price. Example:
+ OneHour. Required. "OneHour"
+ :paramtype unit_of_measure: str or ~azure.mgmt.machinelearningservices.models.UnitOfMeasure
+ :keyword values: The list of estimated prices for using a VM of a particular OS type, tier,
+ etc. Required.
+ :paramtype values: list[~azure.mgmt.machinelearningservices.models.EstimatedVMPrice]
+ """
+ super().__init__(**kwargs)
+ self.billing_currency = billing_currency
+ self.unit_of_measure = unit_of_measure
+ self.values = values
+
+
+class ExternalFQDNResponse(_serialization.Model):
+ """ExternalFQDNResponse.
+
+ :ivar value:
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.FQDNEndpointsPropertyBag]
+ """
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[FQDNEndpointsPropertyBag]"},
+ }
+
+ def __init__(self, *, value: Optional[List["_models.FQDNEndpointsPropertyBag"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value:
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.FQDNEndpointsPropertyBag]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+
+
+class Feature(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.FeatureProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeatureProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.FeatureProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.FeatureProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class FeatureAttributionDriftMonitoringSignal(MonitoringSignalBase):
+ """FeatureAttributionDriftMonitoringSignal.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", "Custom",
+ "ModelPerformance", "GenerationSafetyQuality", and "GenerationTokenStatistics".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar feature_data_type_override: A dictionary that maps feature names to their respective data
+ types.
+ :vartype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :ivar feature_importance_settings: The settings for computing feature importance.
+ :vartype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :ivar metric_threshold: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_threshold:
+ ~azure.mgmt.machinelearningservices.models.FeatureAttributionMetricThreshold
+ :ivar production_data: [Required] The data which drift will be calculated for. Required.
+ :vartype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :ivar reference_data: [Required] The data to calculate drift against. Required.
+ :vartype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+
+ _validation = {
+ "signal_type": {"required": True},
+ "metric_threshold": {"required": True},
+ "production_data": {"required": True},
+ "reference_data": {"required": True},
+ }
+
+ _attribute_map = {
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "feature_data_type_override": {"key": "featureDataTypeOverride", "type": "{str}"},
+ "feature_importance_settings": {"key": "featureImportanceSettings", "type": "FeatureImportanceSettings"},
+ "metric_threshold": {"key": "metricThreshold", "type": "FeatureAttributionMetricThreshold"},
+ "production_data": {"key": "productionData", "type": "[MonitoringInputDataBase]"},
+ "reference_data": {"key": "referenceData", "type": "MonitoringInputDataBase"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric_threshold: "_models.FeatureAttributionMetricThreshold",
+ production_data: List["_models.MonitoringInputDataBase"],
+ reference_data: "_models.MonitoringInputDataBase",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ feature_data_type_override: Optional[Dict[str, Union[str, "_models.MonitoringFeatureDataType"]]] = None,
+ feature_importance_settings: Optional["_models.FeatureImportanceSettings"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword feature_data_type_override: A dictionary that maps feature names to their respective
+ data types.
+ :paramtype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :keyword feature_importance_settings: The settings for computing feature importance.
+ :paramtype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :keyword metric_threshold: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_threshold:
+ ~azure.mgmt.machinelearningservices.models.FeatureAttributionMetricThreshold
+ :keyword production_data: [Required] The data which drift will be calculated for. Required.
+ :paramtype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :keyword reference_data: [Required] The data to calculate drift against. Required.
+ :paramtype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "FeatureAttributionDrift"
+ self.feature_data_type_override = feature_data_type_override
+ self.feature_importance_settings = feature_importance_settings
+ self.metric_threshold = metric_threshold
+ self.production_data = production_data
+ self.reference_data = reference_data
+
+
+class FeatureAttributionMetricThreshold(_serialization.Model):
+ """FeatureAttributionMetricThreshold.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar metric: [Required] The feature attribution metric to calculate. Required.
+ "NormalizedDiscountedCumulativeGain"
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.FeatureAttributionMetric
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+
+ _validation = {
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "metric": {"key": "metric", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.FeatureAttributionMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword metric: [Required] The feature attribution metric to calculate. Required.
+ "NormalizedDiscountedCumulativeGain"
+ :paramtype metric: str or ~azure.mgmt.machinelearningservices.models.FeatureAttributionMetric
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+ super().__init__(**kwargs)
+ self.metric = metric
+ self.threshold = threshold
+
+
+class FeatureImportanceSettings(_serialization.Model):
+ """FeatureImportanceSettings.
+
+ :ivar mode: The mode of operation for computing feature importance. Known values are:
+ "Disabled" and "Enabled".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.FeatureImportanceMode
+ :ivar target_column: The name of the target column within the input data asset.
+ :vartype target_column: str
+ """
+
+ _attribute_map = {
+ "mode": {"key": "mode", "type": "str"},
+ "target_column": {"key": "targetColumn", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ mode: Optional[Union[str, "_models.FeatureImportanceMode"]] = None,
+ target_column: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword mode: The mode of operation for computing feature importance. Known values are:
+ "Disabled" and "Enabled".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.FeatureImportanceMode
+ :keyword target_column: The name of the target column within the input data asset.
+ :paramtype target_column: str
+ """
+ super().__init__(**kwargs)
+ self.mode = mode
+ self.target_column = target_column
+
+
+class FeatureProperties(ResourceBase):
+ """Dto object representing feature.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar data_type: Specifies type. Known values are: "String", "Integer", "Long", "Float",
+ "Double", "Binary", "Datetime", and "Boolean".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.FeatureDataType
+ :ivar feature_name: Specifies name.
+ :vartype feature_name: str
+ """
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "feature_name": {"key": "featureName", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ data_type: Optional[Union[str, "_models.FeatureDataType"]] = None,
+ feature_name: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword data_type: Specifies type. Known values are: "String", "Integer", "Long", "Float",
+ "Double", "Binary", "Datetime", and "Boolean".
+ :paramtype data_type: str or ~azure.mgmt.machinelearningservices.models.FeatureDataType
+ :keyword feature_name: Specifies name.
+ :paramtype feature_name: str
+ """
+ super().__init__(description=description, properties=properties, tags=tags, **kwargs)
+ self.data_type = data_type
+ self.feature_name = feature_name
+
+
+class FeatureResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of Feature entities.
+
+ :ivar next_link: The link to the next page of Feature objects. If null, there are no additional
+ pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type Feature.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Feature]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[Feature]"},
+ }
+
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.Feature"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of Feature objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type Feature.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.Feature]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class FeaturesetContainer(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.FeaturesetContainerProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeaturesetContainerProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.FeaturesetContainerProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.FeaturesetContainerProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class FeaturesetContainerProperties(AssetContainer):
+ """Dto object representing feature set.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar latest_version: The latest version inside this container.
+ :vartype latest_version: str
+ :ivar next_version: The next auto incremental version.
+ :vartype next_version: str
+ :ivar provisioning_state: Provisioning state for the featureset container. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ """
+
+ _validation = {
+ "latest_version": {"readonly": True},
+ "next_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "latest_version": {"key": "latestVersion", "type": "str"},
+ "next_version": {"key": "nextVersion", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_archived: bool = False,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ """
+ super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
+ self.provisioning_state = None
+
+
+class FeaturesetContainerResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of FeaturesetContainer entities.
+
+ :ivar next_link: The link to the next page of FeaturesetContainer objects. If null, there are
+ no additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type FeaturesetContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[FeaturesetContainer]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.FeaturesetContainer"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of FeaturesetContainer objects. If null, there
+ are no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type FeaturesetContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class FeaturesetSpecification(_serialization.Model):
+ """Dto object representing specification.
+
+ :ivar path: Specifies the spec path.
+ :vartype path: str
+ """
+
+ _attribute_map = {
+ "path": {"key": "path", "type": "str"},
+ }
+
+ def __init__(self, *, path: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword path: Specifies the spec path.
+ :paramtype path: str
+ """
+ super().__init__(**kwargs)
+ self.path = path
+
+
+class FeaturesetVersion(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeaturesetVersionProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.FeaturesetVersionProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class FeaturesetVersionBackfillRequest(_serialization.Model):
+ """Request payload for creating a backfill request for a given feature set version.
+
+ :ivar data_availability_status: Specified the data availability status that you want to
+ backfill.
+ :vartype data_availability_status: list[str or
+ ~azure.mgmt.machinelearningservices.models.DataAvailabilityStatus]
+ :ivar description: Specifies description.
+ :vartype description: str
+ :ivar display_name: Specifies description.
+ :vartype display_name: str
+ :ivar feature_window: Specifies the backfill feature window to be materialized.
+ :vartype feature_window: ~azure.mgmt.machinelearningservices.models.FeatureWindow
+ :ivar job_id: Specify the jobId to retry the failed materialization.
+ :vartype job_id: str
+ :ivar properties: Specifies the properties.
+ :vartype properties: dict[str, str]
+ :ivar resource: Specifies the compute resource settings.
+ :vartype resource: ~azure.mgmt.machinelearningservices.models.MaterializationComputeResource
+ :ivar spark_configuration: Specifies the spark compute settings.
+ :vartype spark_configuration: dict[str, str]
+ :ivar tags: Specifies the tags.
+ :vartype tags: dict[str, str]
+ """
+
+ _attribute_map = {
+ "data_availability_status": {"key": "dataAvailabilityStatus", "type": "[str]"},
+ "description": {"key": "description", "type": "str"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "feature_window": {"key": "featureWindow", "type": "FeatureWindow"},
+ "job_id": {"key": "jobId", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "resource": {"key": "resource", "type": "MaterializationComputeResource"},
+ "spark_configuration": {"key": "sparkConfiguration", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ data_availability_status: Optional[List[Union[str, "_models.DataAvailabilityStatus"]]] = None,
+ description: Optional[str] = None,
+ display_name: Optional[str] = None,
+ feature_window: Optional["_models.FeatureWindow"] = None,
+ job_id: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ resource: Optional["_models.MaterializationComputeResource"] = None,
+ spark_configuration: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword data_availability_status: Specified the data availability status that you want to
+ backfill.
+ :paramtype data_availability_status: list[str or
+ ~azure.mgmt.machinelearningservices.models.DataAvailabilityStatus]
+ :keyword description: Specifies description.
+ :paramtype description: str
+ :keyword display_name: Specifies description.
+ :paramtype display_name: str
+ :keyword feature_window: Specifies the backfill feature window to be materialized.
+ :paramtype feature_window: ~azure.mgmt.machinelearningservices.models.FeatureWindow
+ :keyword job_id: Specify the jobId to retry the failed materialization.
+ :paramtype job_id: str
+ :keyword properties: Specifies the properties.
+ :paramtype properties: dict[str, str]
+ :keyword resource: Specifies the compute resource settings.
+ :paramtype resource: ~azure.mgmt.machinelearningservices.models.MaterializationComputeResource
+ :keyword spark_configuration: Specifies the spark compute settings.
+ :paramtype spark_configuration: dict[str, str]
+ :keyword tags: Specifies the tags.
+ :paramtype tags: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.data_availability_status = data_availability_status
+ self.description = description
+ self.display_name = display_name
+ self.feature_window = feature_window
+ self.job_id = job_id
+ self.properties = properties
+ self.resource = resource
+ self.spark_configuration = spark_configuration
+ self.tags = tags
+
+
+class FeaturesetVersionBackfillResponse(_serialization.Model):
+ """Response payload for creating a backfill request for a given feature set version.
+
+ :ivar job_ids: List of jobs submitted as part of the backfill request.
+ :vartype job_ids: list[str]
+ """
+
+ _attribute_map = {
+ "job_ids": {"key": "jobIds", "type": "[str]"},
+ }
+
+ def __init__(self, *, job_ids: Optional[List[str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword job_ids: List of jobs submitted as part of the backfill request.
+ :paramtype job_ids: list[str]
+ """
+ super().__init__(**kwargs)
+ self.job_ids = job_ids
+
+
+class FeaturesetVersionProperties(AssetBase): # pylint: disable=too-many-instance-attributes
+ """Dto object representing feature set version.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :vartype is_archived: bool
+ :ivar entities: Specifies list of entities.
+ :vartype entities: list[str]
+ :ivar materialization_settings: Specifies the materialization settings.
+ :vartype materialization_settings:
+ ~azure.mgmt.machinelearningservices.models.MaterializationSettings
+ :ivar provisioning_state: Provisioning state for the featureset version container. Known values
+ are: "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar specification: Specifies the feature spec details.
+ :vartype specification: ~azure.mgmt.machinelearningservices.models.FeaturesetSpecification
+ :ivar stage: Specifies the asset stage.
+ :vartype stage: str
+ """
+
+ _validation = {
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "entities": {"key": "entities", "type": "[str]"},
+ "materialization_settings": {"key": "materializationSettings", "type": "MaterializationSettings"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "specification": {"key": "specification", "type": "FeaturesetSpecification"},
+ "stage": {"key": "stage", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ entities: Optional[List[str]] = None,
+ materialization_settings: Optional["_models.MaterializationSettings"] = None,
+ specification: Optional["_models.FeaturesetSpecification"] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :paramtype is_archived: bool
+ :keyword entities: Specifies list of entities.
+ :paramtype entities: list[str]
+ :keyword materialization_settings: Specifies the materialization settings.
+ :paramtype materialization_settings:
+ ~azure.mgmt.machinelearningservices.models.MaterializationSettings
+ :keyword specification: Specifies the feature spec details.
+ :paramtype specification: ~azure.mgmt.machinelearningservices.models.FeaturesetSpecification
+ :keyword stage: Specifies the asset stage.
+ :paramtype stage: str
+ """
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ auto_delete_setting=auto_delete_setting,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
+ **kwargs
+ )
+ self.entities = entities
+ self.materialization_settings = materialization_settings
+ self.provisioning_state = None
+ self.specification = specification
+ self.stage = stage
+
+
+class FeaturesetVersionResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of FeaturesetVersion entities.
+
+ :ivar next_link: The link to the next page of FeaturesetVersion objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type FeaturesetVersion.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[FeaturesetVersion]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.FeaturesetVersion"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of FeaturesetVersion objects. If null, there are
+ no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type FeaturesetVersion.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class FeaturestoreEntityContainer(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties:
+ ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainerProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeaturestoreEntityContainerProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.FeaturestoreEntityContainerProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainerProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class FeaturestoreEntityContainerProperties(AssetContainer):
+ """Dto object representing feature entity.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar latest_version: The latest version inside this container.
+ :vartype latest_version: str
+ :ivar next_version: The next auto incremental version.
+ :vartype next_version: str
+ :ivar provisioning_state: Provisioning state for the featurestore entity container. Known
+ values are: "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ """
+
+ _validation = {
+ "latest_version": {"readonly": True},
+ "next_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "latest_version": {"key": "latestVersion", "type": "str"},
+ "next_version": {"key": "nextVersion", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_archived: bool = False,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ """
+ super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
+ self.provisioning_state = None
+
+
+class FeaturestoreEntityContainerResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of FeaturestoreEntityContainer entities.
+
+ :ivar next_link: The link to the next page of FeaturestoreEntityContainer objects. If null,
+ there are no additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type FeaturestoreEntityContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[FeaturestoreEntityContainer]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.FeaturestoreEntityContainer"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of FeaturestoreEntityContainer objects. If null,
+ there are no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type FeaturestoreEntityContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class FeaturestoreEntityVersion(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties:
+ ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersionProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeaturestoreEntityVersionProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.FeaturestoreEntityVersionProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersionProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class FeaturestoreEntityVersionProperties(AssetBase):
+ """Dto object representing feature entity version.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :vartype is_archived: bool
+ :ivar index_columns: Specifies index columns.
+ :vartype index_columns: list[~azure.mgmt.machinelearningservices.models.IndexColumn]
+ :ivar provisioning_state: Provisioning state for the featurestore entity version. Known values
+ are: "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar stage: Specifies the asset stage.
+ :vartype stage: str
+ """
+
+ _validation = {
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "index_columns": {"key": "indexColumns", "type": "[IndexColumn]"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "stage": {"key": "stage", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ index_columns: Optional[List["_models.IndexColumn"]] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :paramtype is_archived: bool
+ :keyword index_columns: Specifies index columns.
+ :paramtype index_columns: list[~azure.mgmt.machinelearningservices.models.IndexColumn]
+ :keyword stage: Specifies the asset stage.
+ :paramtype stage: str
+ """
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ auto_delete_setting=auto_delete_setting,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
+ **kwargs
+ )
+ self.index_columns = index_columns
+ self.provisioning_state = None
+ self.stage = stage
+
+
+class FeaturestoreEntityVersionResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of FeaturestoreEntityVersion entities.
+
+ :ivar next_link: The link to the next page of FeaturestoreEntityVersion objects. If null, there
+ are no additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type FeaturestoreEntityVersion.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[FeaturestoreEntityVersion]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.FeaturestoreEntityVersion"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of FeaturestoreEntityVersion objects. If null,
+ there are no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type FeaturestoreEntityVersion.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class FeatureStoreSettings(_serialization.Model):
+ """FeatureStoreSettings.
+
+ :ivar compute_runtime:
+ :vartype compute_runtime: ~azure.mgmt.machinelearningservices.models.ComputeRuntimeDto
+ :ivar offline_store_connection_name:
+ :vartype offline_store_connection_name: str
+ :ivar online_store_connection_name:
+ :vartype online_store_connection_name: str
+ """
+
+ _attribute_map = {
+ "compute_runtime": {"key": "computeRuntime", "type": "ComputeRuntimeDto"},
+ "offline_store_connection_name": {"key": "offlineStoreConnectionName", "type": "str"},
+ "online_store_connection_name": {"key": "onlineStoreConnectionName", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ compute_runtime: Optional["_models.ComputeRuntimeDto"] = None,
+ offline_store_connection_name: Optional[str] = None,
+ online_store_connection_name: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword compute_runtime:
+ :paramtype compute_runtime: ~azure.mgmt.machinelearningservices.models.ComputeRuntimeDto
+ :keyword offline_store_connection_name:
+ :paramtype offline_store_connection_name: str
+ :keyword online_store_connection_name:
+ :paramtype online_store_connection_name: str
+ """
+ super().__init__(**kwargs)
+ self.compute_runtime = compute_runtime
+ self.offline_store_connection_name = offline_store_connection_name
+ self.online_store_connection_name = online_store_connection_name
+
+
+class FeatureSubset(MonitoringFeatureFilterBase):
+ """FeatureSubset.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar filter_type: [Required] Specifies the feature filter to leverage when selecting features
+ to calculate metrics over. Required. Known values are: "AllFeatures", "TopNByAttribution", and
+ "FeatureSubset".
+ :vartype filter_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterType
+ :ivar features: [Required] The list of features to include. Required.
+ :vartype features: list[str]
+ """
+
+ _validation = {
+ "filter_type": {"required": True},
+ "features": {"required": True},
+ }
+
+ _attribute_map = {
+ "filter_type": {"key": "filterType", "type": "str"},
+ "features": {"key": "features", "type": "[str]"},
+ }
+
+ def __init__(self, *, features: List[str], **kwargs: Any) -> None:
+ """
+ :keyword features: [Required] The list of features to include. Required.
+ :paramtype features: list[str]
+ """
+ super().__init__(**kwargs)
+ self.filter_type: str = "FeatureSubset"
+ self.features = features
+
+
+class FeatureWindow(_serialization.Model):
+ """Specifies the feature window.
+
+ :ivar feature_window_end: Specifies the feature window end time.
+ :vartype feature_window_end: ~datetime.datetime
+ :ivar feature_window_start: Specifies the feature window start time.
+ :vartype feature_window_start: ~datetime.datetime
+ """
+
+ _attribute_map = {
+ "feature_window_end": {"key": "featureWindowEnd", "type": "iso-8601"},
+ "feature_window_start": {"key": "featureWindowStart", "type": "iso-8601"},
+ }
+
+ def __init__(
+ self,
+ *,
+ feature_window_end: Optional[datetime.datetime] = None,
+ feature_window_start: Optional[datetime.datetime] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword feature_window_end: Specifies the feature window end time.
+ :paramtype feature_window_end: ~datetime.datetime
+ :keyword feature_window_start: Specifies the feature window start time.
+ :paramtype feature_window_start: ~datetime.datetime
+ """
+ super().__init__(**kwargs)
+ self.feature_window_end = feature_window_end
+ self.feature_window_start = feature_window_start
+
+
+class FeaturizationSettings(_serialization.Model):
+ """Featurization Configuration.
+
+ :ivar dataset_language: Dataset language, useful for the text data.
+ :vartype dataset_language: str
+ """
+
+ _attribute_map = {
+ "dataset_language": {"key": "datasetLanguage", "type": "str"},
+ }
+
+ def __init__(self, *, dataset_language: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword dataset_language: Dataset language, useful for the text data.
+ :paramtype dataset_language: str
+ """
+ super().__init__(**kwargs)
+ self.dataset_language = dataset_language
+
+
+class FileSystemSource(DataImportSource):
+ """FileSystemSource.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar connection: Workspace connection for data import source storage.
+ :vartype connection: str
+ :ivar source_type: [Required] Specifies the type of data. Required. Known values are:
+ "database" and "file_system".
+ :vartype source_type: str or ~azure.mgmt.machinelearningservices.models.DataImportSourceType
+ :ivar path: Path on data import FileSystem source.
+ :vartype path: str
+ """
+
+ _validation = {
+ "source_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "connection": {"key": "connection", "type": "str"},
+ "source_type": {"key": "sourceType", "type": "str"},
+ "path": {"key": "path", "type": "str"},
+ }
+
+ def __init__(self, *, connection: Optional[str] = None, path: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword connection: Workspace connection for data import source storage.
+ :paramtype connection: str
+ :keyword path: Path on data import FileSystem source.
+ :paramtype path: str
+ """
+ super().__init__(connection=connection, **kwargs)
+ self.source_type: str = "file_system"
+ self.path = path
+
+
+class MonitoringInputDataBase(_serialization.Model):
+ """Monitoring input data base definition.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ FixedInputData, RollingInputData, StaticInputData
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar columns: Mapping of column names to special uses.
+ :vartype columns: dict[str, str]
+ :ivar data_context: The context metadata of the data source.
+ :vartype data_context: str
+ :ivar input_data_type: [Required] Specifies the type of signal to monitor. Required. Known
+ values are: "Static", "Rolling", and "Fixed".
+ :vartype input_data_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataType
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ """
+
+ _validation = {
+ "input_data_type": {"required": True},
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "columns": {"key": "columns", "type": "{str}"},
+ "data_context": {"key": "dataContext", "type": "str"},
+ "input_data_type": {"key": "inputDataType", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ }
+
+ _subtype_map = {
+ "input_data_type": {"Fixed": "FixedInputData", "Rolling": "RollingInputData", "Static": "StaticInputData"}
+ }
+
+ def __init__(
+ self,
+ *,
+ job_input_type: Union[str, "_models.JobInputType"],
+ uri: str,
+ columns: Optional[Dict[str, str]] = None,
+ data_context: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword columns: Mapping of column names to special uses.
+ :paramtype columns: dict[str, str]
+ :keyword data_context: The context metadata of the data source.
+ :paramtype data_context: str
+ :keyword job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :paramtype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ """
+ super().__init__(**kwargs)
+ self.columns = columns
+ self.data_context = data_context
+ self.input_data_type: Optional[str] = None
+ self.job_input_type = job_input_type
+ self.uri = uri
+
+
+class FixedInputData(MonitoringInputDataBase):
+ """Fixed input data definition.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar columns: Mapping of column names to special uses.
+ :vartype columns: dict[str, str]
+ :ivar data_context: The context metadata of the data source.
+ :vartype data_context: str
+ :ivar input_data_type: [Required] Specifies the type of signal to monitor. Required. Known
+ values are: "Static", "Rolling", and "Fixed".
+ :vartype input_data_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataType
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ """
+
+ _validation = {
+ "input_data_type": {"required": True},
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "columns": {"key": "columns", "type": "{str}"},
+ "data_context": {"key": "dataContext", "type": "str"},
+ "input_data_type": {"key": "inputDataType", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ job_input_type: Union[str, "_models.JobInputType"],
+ uri: str,
+ columns: Optional[Dict[str, str]] = None,
+ data_context: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword columns: Mapping of column names to special uses.
+ :paramtype columns: dict[str, str]
+ :keyword data_context: The context metadata of the data source.
+ :paramtype data_context: str
+ :keyword job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :paramtype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ """
+ super().__init__(columns=columns, data_context=data_context, job_input_type=job_input_type, uri=uri, **kwargs)
+ self.input_data_type: str = "Fixed"
+
+
+class FlavorData(_serialization.Model):
+ """FlavorData.
+
+ :ivar data: Model flavor-specific data.
+ :vartype data: dict[str, str]
+ """
+
+ _attribute_map = {
+ "data": {"key": "data", "type": "{str}"},
+ }
+
+ def __init__(self, *, data: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword data: Model flavor-specific data.
+ :paramtype data: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.data = data
+
+
+class Forecasting(TableVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
+ """Forecasting task in AutoML Table vertical.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar cv_split_column_names: Columns to use for CVSplit data.
+ :vartype cv_split_column_names: list[str]
+ :ivar featurization_settings: Featurization inputs needed for AutoML job.
+ :vartype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :ivar fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :vartype fixed_parameters: ~azure.mgmt.machinelearningservices.models.TableFixedParameters
+ :ivar limit_settings: Execution constraints for AutoMLJob.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
+ when validation dataset is not provided.
+ :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space: list[~azure.mgmt.machinelearningservices.models.TableParameterSubspace]
+ :ivar sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.TableSweepSettings
+ :ivar test_data: Test data input.
+ :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype test_data_size: float
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :vartype weight_column_name: str
+ :ivar forecasting_settings: Forecasting task specific inputs.
+ :vartype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
+ :ivar primary_metric: Primary metric for forecasting task. Known values are:
+ "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
+ "NormalizedMeanAbsoluteError".
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
+ :ivar training_settings: Inputs for training phase for an AutoML Job.
+ :vartype training_settings:
+ ~azure.mgmt.machinelearningservices.models.ForecastingTrainingSettings
+ """
+
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ }
+
+ _attribute_map = {
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
+ "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
+ "fixed_parameters": {"key": "fixedParameters", "type": "TableFixedParameters"},
+ "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
+ "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
+ "search_space": {"key": "searchSpace", "type": "[TableParameterSubspace]"},
+ "sweep_settings": {"key": "sweepSettings", "type": "TableSweepSettings"},
+ "test_data": {"key": "testData", "type": "MLTableJobInput"},
+ "test_data_size": {"key": "testDataSize", "type": "float"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "weight_column_name": {"key": "weightColumnName", "type": "str"},
+ "forecasting_settings": {"key": "forecastingSettings", "type": "ForecastingSettings"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "training_settings": {"key": "trainingSettings", "type": "ForecastingTrainingSettings"},
+ }
+
+ def __init__(
+ self,
+ *,
+ training_data: "_models.MLTableJobInput",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ cv_split_column_names: Optional[List[str]] = None,
+ featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
+ fixed_parameters: Optional["_models.TableFixedParameters"] = None,
+ limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
+ n_cross_validations: Optional["_models.NCrossValidations"] = None,
+ search_space: Optional[List["_models.TableParameterSubspace"]] = None,
+ sweep_settings: Optional["_models.TableSweepSettings"] = None,
+ test_data: Optional["_models.MLTableJobInput"] = None,
+ test_data_size: Optional[float] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ weight_column_name: Optional[str] = None,
+ forecasting_settings: Optional["_models.ForecastingSettings"] = None,
+ primary_metric: Optional[Union[str, "_models.ForecastingPrimaryMetrics"]] = None,
+ training_settings: Optional["_models.ForecastingTrainingSettings"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword cv_split_column_names: Columns to use for CVSplit data.
+ :paramtype cv_split_column_names: list[str]
+ :keyword featurization_settings: Featurization inputs needed for AutoML job.
+ :paramtype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :keyword fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :paramtype fixed_parameters: ~azure.mgmt.machinelearningservices.models.TableFixedParameters
+ :keyword limit_settings: Execution constraints for AutoMLJob.
+ :paramtype limit_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :keyword n_cross_validations: Number of cross validation folds to be applied on training
+ dataset
+ when validation dataset is not provided.
+ :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.TableParameterSubspace]
+ :keyword sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.TableSweepSettings
+ :keyword test_data: Test data input.
+ :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype test_data_size: float
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :paramtype weight_column_name: str
+ :keyword forecasting_settings: Forecasting task specific inputs.
+ :paramtype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
+ :keyword primary_metric: Primary metric for forecasting task. Known values are:
+ "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
+ "NormalizedMeanAbsoluteError".
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
+ :keyword training_settings: Inputs for training phase for an AutoML Job.
+ :paramtype training_settings:
+ ~azure.mgmt.machinelearningservices.models.ForecastingTrainingSettings
+ """
+ super().__init__(
+ cv_split_column_names=cv_split_column_names,
+ featurization_settings=featurization_settings,
+ fixed_parameters=fixed_parameters,
+ limit_settings=limit_settings,
+ n_cross_validations=n_cross_validations,
+ search_space=search_space,
+ sweep_settings=sweep_settings,
+ test_data=test_data,
+ test_data_size=test_data_size,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ weight_column_name=weight_column_name,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "Forecasting"
+ self.training_data = training_data
+ self.forecasting_settings = forecasting_settings
+ self.primary_metric = primary_metric
+ self.training_settings = training_settings
+ self.cv_split_column_names = cv_split_column_names
+ self.featurization_settings = featurization_settings
+ self.fixed_parameters = fixed_parameters
+ self.limit_settings = limit_settings
+ self.n_cross_validations = n_cross_validations
+ self.search_space = search_space
+ self.sweep_settings = sweep_settings
+ self.test_data = test_data
+ self.test_data_size = test_data_size
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.weight_column_name = weight_column_name
+
+
+class ForecastingSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes
+ """Forecasting specific parameters.
+
+ :ivar country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
+ These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
+ :vartype country_or_region_for_holidays: str
+ :ivar cv_step_size: Number of periods between the origin time of one CV fold and the next fold.
+ For
+ example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
+ three days apart.
+ :vartype cv_step_size: int
+ :ivar feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
+ Known values are: "None" and "Auto".
+ :vartype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
+ :ivar features_unknown_at_forecast_time: The feature columns that are available for training
+ but unknown at the time of forecast/inference.
+ If features_unknown_at_forecast_time is not set, it is assumed that all the feature columns in
+ the dataset are known at inference time.
+ :vartype features_unknown_at_forecast_time: list[str]
+ :ivar forecast_horizon: The desired maximum forecast horizon in units of time-series frequency.
+ :vartype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
+ :ivar frequency: When forecasting, this parameter represents the period with which the forecast
+ is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset frequency
+ by default.
+ :vartype frequency: str
+ :ivar seasonality: Set time series seasonality as an integer multiple of the series frequency.
+ If seasonality is set to 'auto', it will be inferred.
+ :vartype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
+ :ivar short_series_handling_config: The parameter defining how if AutoML should handle short
+ time series. Known values are: "None", "Auto", "Pad", and "Drop".
+ :vartype short_series_handling_config: str or
+ ~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
+ :ivar target_aggregate_function: The function to be used to aggregate the time series target
+ column to conform to a user specified frequency.
+ If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
+ error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
+ Known values are: "None", "Sum", "Max", "Min", and "Mean".
+ :vartype target_aggregate_function: str or
+ ~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
+ :ivar target_lags: The number of past periods to lag from the target column.
+ :vartype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
+ :ivar target_rolling_window_size: The number of past periods used to create a rolling window
+ average of the target column.
+ :vartype target_rolling_window_size:
+ ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
+ :ivar time_column_name: The name of the time column. This parameter is required when
+ forecasting to specify the datetime column in the input data used for building the time series
+ and inferring its frequency.
+ :vartype time_column_name: str
+ :ivar time_series_id_column_names: The names of columns used to group a timeseries. It can be
+ used to create multiple series.
+ If grain is not defined, the data set is assumed to be one time-series. This parameter is used
+ with task type forecasting.
+ :vartype time_series_id_column_names: list[str]
+ :ivar use_stl: Configure STL Decomposition of the time-series target column. Known values are:
+ "None", "Season", and "SeasonTrend".
+ :vartype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
+ """
+
+ _attribute_map = {
+ "country_or_region_for_holidays": {"key": "countryOrRegionForHolidays", "type": "str"},
+ "cv_step_size": {"key": "cvStepSize", "type": "int"},
+ "feature_lags": {"key": "featureLags", "type": "str"},
+ "features_unknown_at_forecast_time": {"key": "featuresUnknownAtForecastTime", "type": "[str]"},
+ "forecast_horizon": {"key": "forecastHorizon", "type": "ForecastHorizon"},
+ "frequency": {"key": "frequency", "type": "str"},
+ "seasonality": {"key": "seasonality", "type": "Seasonality"},
+ "short_series_handling_config": {"key": "shortSeriesHandlingConfig", "type": "str"},
+ "target_aggregate_function": {"key": "targetAggregateFunction", "type": "str"},
+ "target_lags": {"key": "targetLags", "type": "TargetLags"},
+ "target_rolling_window_size": {"key": "targetRollingWindowSize", "type": "TargetRollingWindowSize"},
+ "time_column_name": {"key": "timeColumnName", "type": "str"},
+ "time_series_id_column_names": {"key": "timeSeriesIdColumnNames", "type": "[str]"},
+ "use_stl": {"key": "useStl", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ country_or_region_for_holidays: Optional[str] = None,
+ cv_step_size: Optional[int] = None,
+ feature_lags: Optional[Union[str, "_models.FeatureLags"]] = None,
+ features_unknown_at_forecast_time: Optional[List[str]] = None,
+ forecast_horizon: Optional["_models.ForecastHorizon"] = None,
+ frequency: Optional[str] = None,
+ seasonality: Optional["_models.Seasonality"] = None,
+ short_series_handling_config: Optional[Union[str, "_models.ShortSeriesHandlingConfiguration"]] = None,
+ target_aggregate_function: Optional[Union[str, "_models.TargetAggregationFunction"]] = None,
+ target_lags: Optional["_models.TargetLags"] = None,
+ target_rolling_window_size: Optional["_models.TargetRollingWindowSize"] = None,
+ time_column_name: Optional[str] = None,
+ time_series_id_column_names: Optional[List[str]] = None,
+ use_stl: Optional[Union[str, "_models.UseStl"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
+ These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
+ :paramtype country_or_region_for_holidays: str
+ :keyword cv_step_size: Number of periods between the origin time of one CV fold and the next
+ fold. For
+ example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
+ three days apart.
+ :paramtype cv_step_size: int
+ :keyword feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
+ Known values are: "None" and "Auto".
+ :paramtype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
+ :keyword features_unknown_at_forecast_time: The feature columns that are available for training
+ but unknown at the time of forecast/inference.
+ If features_unknown_at_forecast_time is not set, it is assumed that all the feature columns in
+ the dataset are known at inference time.
+ :paramtype features_unknown_at_forecast_time: list[str]
+ :keyword forecast_horizon: The desired maximum forecast horizon in units of time-series
+ frequency.
+ :paramtype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
+ :keyword frequency: When forecasting, this parameter represents the period with which the
+ forecast is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset
+ frequency by default.
+ :paramtype frequency: str
+ :keyword seasonality: Set time series seasonality as an integer multiple of the series
+ frequency.
+ If seasonality is set to 'auto', it will be inferred.
+ :paramtype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
+ :keyword short_series_handling_config: The parameter defining how if AutoML should handle short
+ time series. Known values are: "None", "Auto", "Pad", and "Drop".
+ :paramtype short_series_handling_config: str or
+ ~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
+ :keyword target_aggregate_function: The function to be used to aggregate the time series target
+ column to conform to a user specified frequency.
+ If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
+ error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
+ Known values are: "None", "Sum", "Max", "Min", and "Mean".
+ :paramtype target_aggregate_function: str or
+ ~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
+ :keyword target_lags: The number of past periods to lag from the target column.
+ :paramtype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
+ :keyword target_rolling_window_size: The number of past periods used to create a rolling window
+ average of the target column.
+ :paramtype target_rolling_window_size:
+ ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
+ :keyword time_column_name: The name of the time column. This parameter is required when
+ forecasting to specify the datetime column in the input data used for building the time series
+ and inferring its frequency.
+ :paramtype time_column_name: str
+ :keyword time_series_id_column_names: The names of columns used to group a timeseries. It can
+ be used to create multiple series.
+ If grain is not defined, the data set is assumed to be one time-series. This parameter is used
+ with task type forecasting.
+ :paramtype time_series_id_column_names: list[str]
+ :keyword use_stl: Configure STL Decomposition of the time-series target column. Known values
+ are: "None", "Season", and "SeasonTrend".
+ :paramtype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
+ """
+ super().__init__(**kwargs)
+ self.country_or_region_for_holidays = country_or_region_for_holidays
+ self.cv_step_size = cv_step_size
+ self.feature_lags = feature_lags
+ self.features_unknown_at_forecast_time = features_unknown_at_forecast_time
+ self.forecast_horizon = forecast_horizon
+ self.frequency = frequency
+ self.seasonality = seasonality
+ self.short_series_handling_config = short_series_handling_config
+ self.target_aggregate_function = target_aggregate_function
+ self.target_lags = target_lags
+ self.target_rolling_window_size = target_rolling_window_size
+ self.time_column_name = time_column_name
+ self.time_series_id_column_names = time_series_id_column_names
+ self.use_stl = use_stl
+
+
+class ForecastingTrainingSettings(TrainingSettings):
+ """Forecasting Training related configuration.
+
+ :ivar enable_dnn_training: Enable recommendation of DNN models.
+ :vartype enable_dnn_training: bool
+ :ivar enable_model_explainability: Flag to turn on explainability on best model.
+ :vartype enable_model_explainability: bool
+ :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :vartype enable_onnx_compatible_models: bool
+ :ivar enable_stack_ensemble: Enable stack ensemble run.
+ :vartype enable_stack_ensemble: bool
+ :ivar enable_vote_ensemble: Enable voting ensemble run.
+ :vartype enable_vote_ensemble: bool
+ :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :vartype ensemble_model_download_timeout: ~datetime.timedelta
+ :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :vartype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :ivar training_mode: TrainingMode mode - Setting to 'auto' is same as setting it to
+ 'non-distributed' for now, however in the future may result in mixed mode or heuristics based
+ mode selection. Default is 'auto'.
+ If 'Distributed' then only distributed featurization is used and distributed algorithms are
+ chosen.
+ If 'NonDistributed' then only non distributed algorithms are chosen. Known values are: "Auto",
+ "Distributed", and "NonDistributed".
+ :vartype training_mode: str or ~azure.mgmt.machinelearningservices.models.TrainingMode
+ :ivar allowed_training_algorithms: Allowed models for forecasting task.
+ :vartype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ :ivar blocked_training_algorithms: Blocked models for forecasting task.
+ :vartype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ """
+
+ _attribute_map = {
+ "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
+ "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
+ "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
+ "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
+ "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
+ "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
+ "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
+ "training_mode": {"key": "trainingMode", "type": "str"},
+ "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
+ "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ enable_dnn_training: bool = False,
+ enable_model_explainability: bool = True,
+ enable_onnx_compatible_models: bool = False,
+ enable_stack_ensemble: bool = True,
+ enable_vote_ensemble: bool = True,
+ ensemble_model_download_timeout: datetime.timedelta = "PT5M",
+ stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
+ training_mode: Optional[Union[str, "_models.TrainingMode"]] = None,
+ allowed_training_algorithms: Optional[List[Union[str, "_models.ForecastingModels"]]] = None,
+ blocked_training_algorithms: Optional[List[Union[str, "_models.ForecastingModels"]]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enable_dnn_training: Enable recommendation of DNN models.
+ :paramtype enable_dnn_training: bool
+ :keyword enable_model_explainability: Flag to turn on explainability on best model.
+ :paramtype enable_model_explainability: bool
+ :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :paramtype enable_onnx_compatible_models: bool
+ :keyword enable_stack_ensemble: Enable stack ensemble run.
+ :paramtype enable_stack_ensemble: bool
+ :keyword enable_vote_ensemble: Enable voting ensemble run.
+ :paramtype enable_vote_ensemble: bool
+ :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :paramtype ensemble_model_download_timeout: ~datetime.timedelta
+ :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :paramtype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :keyword training_mode: TrainingMode mode - Setting to 'auto' is same as setting it to
+ 'non-distributed' for now, however in the future may result in mixed mode or heuristics based
+ mode selection. Default is 'auto'.
+ If 'Distributed' then only distributed featurization is used and distributed algorithms are
+ chosen.
+ If 'NonDistributed' then only non distributed algorithms are chosen. Known values are: "Auto",
+ "Distributed", and "NonDistributed".
+ :paramtype training_mode: str or ~azure.mgmt.machinelearningservices.models.TrainingMode
+ :keyword allowed_training_algorithms: Allowed models for forecasting task.
+ :paramtype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ :keyword blocked_training_algorithms: Blocked models for forecasting task.
+ :paramtype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ """
+ super().__init__(
+ enable_dnn_training=enable_dnn_training,
+ enable_model_explainability=enable_model_explainability,
+ enable_onnx_compatible_models=enable_onnx_compatible_models,
+ enable_stack_ensemble=enable_stack_ensemble,
+ enable_vote_ensemble=enable_vote_ensemble,
+ ensemble_model_download_timeout=ensemble_model_download_timeout,
+ stack_ensemble_settings=stack_ensemble_settings,
+ training_mode=training_mode,
+ **kwargs
+ )
+ self.allowed_training_algorithms = allowed_training_algorithms
+ self.blocked_training_algorithms = blocked_training_algorithms
+
+
+class FQDNEndpoint(_serialization.Model):
+ """FQDNEndpoint.
+
+ :ivar domain_name:
+ :vartype domain_name: str
+ :ivar endpoint_details:
+ :vartype endpoint_details: list[~azure.mgmt.machinelearningservices.models.FQDNEndpointDetail]
+ """
+
+ _attribute_map = {
+ "domain_name": {"key": "domainName", "type": "str"},
+ "endpoint_details": {"key": "endpointDetails", "type": "[FQDNEndpointDetail]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ domain_name: Optional[str] = None,
+ endpoint_details: Optional[List["_models.FQDNEndpointDetail"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword domain_name:
+ :paramtype domain_name: str
+ :keyword endpoint_details:
+ :paramtype endpoint_details:
+ list[~azure.mgmt.machinelearningservices.models.FQDNEndpointDetail]
+ """
+ super().__init__(**kwargs)
+ self.domain_name = domain_name
+ self.endpoint_details = endpoint_details
+
+
+class FQDNEndpointDetail(_serialization.Model):
+ """FQDNEndpointDetail.
+
+ :ivar port:
+ :vartype port: int
+ """
+
+ _attribute_map = {
+ "port": {"key": "port", "type": "int"},
+ }
+
+ def __init__(self, *, port: Optional[int] = None, **kwargs: Any) -> None:
+ """
+ :keyword port:
+ :paramtype port: int
+ """
+ super().__init__(**kwargs)
+ self.port = port
+
+
+class FQDNEndpoints(_serialization.Model):
+ """FQDNEndpoints.
+
+ :ivar category:
+ :vartype category: str
+ :ivar endpoints:
+ :vartype endpoints: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoint]
+ """
+
+ _attribute_map = {
+ "category": {"key": "category", "type": "str"},
+ "endpoints": {"key": "endpoints", "type": "[FQDNEndpoint]"},
+ }
+
+ def __init__(
+ self, *, category: Optional[str] = None, endpoints: Optional[List["_models.FQDNEndpoint"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword category:
+ :paramtype category: str
+ :keyword endpoints:
+ :paramtype endpoints: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoint]
+ """
+ super().__init__(**kwargs)
+ self.category = category
+ self.endpoints = endpoints
+
+
+class FQDNEndpointsPropertyBag(_serialization.Model):
+ """Property bag for FQDN endpoints result.
+
+ :ivar properties:
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.FQDNEndpoints
+ """
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "FQDNEndpoints"},
+ }
+
+ def __init__(self, *, properties: Optional["_models.FQDNEndpoints"] = None, **kwargs: Any) -> None:
+ """
+ :keyword properties:
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.FQDNEndpoints
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class OutboundRule(_serialization.Model):
+ """Outbound rule for the managed network of a machine learning workspace.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ FqdnOutboundRule, PrivateEndpointOutboundRule, ServiceTagOutboundRule
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar category: Category of a managed network outbound rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", and "UserDefined".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :ivar status: Type of a managed network outbound rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar type: Type of a managed network outbound rule of a machine learning workspace. Required.
+ Known values are: "FQDN", "PrivateEndpoint", and "ServiceTag".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.RuleType
+ """
+
+ _validation = {
+ "type": {"required": True},
+ }
+
+ _attribute_map = {
+ "category": {"key": "category", "type": "str"},
+ "status": {"key": "status", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ }
+
+ _subtype_map = {
+ "type": {
+ "FQDN": "FqdnOutboundRule",
+ "PrivateEndpoint": "PrivateEndpointOutboundRule",
+ "ServiceTag": "ServiceTagOutboundRule",
+ }
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.RuleCategory"]] = None,
+ status: Optional[Union[str, "_models.RuleStatus"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of a managed network outbound rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", and "UserDefined".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :keyword status: Type of a managed network outbound rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ """
+ super().__init__(**kwargs)
+ self.category = category
+ self.status = status
+ self.type: Optional[str] = None
+
+
+class FqdnOutboundRule(OutboundRule):
+ """FQDN Outbound Rule for the managed network of a machine learning workspace.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar category: Category of a managed network outbound rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", and "UserDefined".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :ivar status: Type of a managed network outbound rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar type: Type of a managed network outbound rule of a machine learning workspace. Required.
+ Known values are: "FQDN", "PrivateEndpoint", and "ServiceTag".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.RuleType
+ :ivar destination:
+ :vartype destination: str
+ """
+
+ _validation = {
+ "type": {"required": True},
+ }
+
+ _attribute_map = {
+ "category": {"key": "category", "type": "str"},
+ "status": {"key": "status", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "destination": {"key": "destination", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.RuleCategory"]] = None,
+ status: Optional[Union[str, "_models.RuleStatus"]] = None,
+ destination: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of a managed network outbound rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", and "UserDefined".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :keyword status: Type of a managed network outbound rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :keyword destination:
+ :paramtype destination: str
+ """
+ super().__init__(category=category, status=status, **kwargs)
+ self.type: str = "FQDN"
+ self.destination = destination
+
+
+class GenerationSafetyQualityMetricThreshold(_serialization.Model):
+ """Generation safety quality metric threshold definition.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar metric: [Required] Gets or sets the feature attribution metric to calculate. Required.
+ Known values are: "AcceptableGroundednessScorePerInstance", "AggregatedGroundednessPassRate",
+ "AcceptableCoherenceScorePerInstance", "AggregatedCoherencePassRate",
+ "AcceptableFluencyScorePerInstance", "AggregatedFluencyPassRate",
+ "AcceptableSimilarityScorePerInstance", "AggregatedSimilarityPassRate",
+ "AcceptableRelevanceScorePerInstance", and "AggregatedRelevancePassRate".
+ :vartype metric: str or
+ ~azure.mgmt.machinelearningservices.models.GenerationSafetyQualityMetric
+ :ivar threshold: Gets or sets the threshold value.
+ If null, a default value will be set depending on the selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+
+ _validation = {
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "metric": {"key": "metric", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.GenerationSafetyQualityMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword metric: [Required] Gets or sets the feature attribution metric to calculate. Required.
+ Known values are: "AcceptableGroundednessScorePerInstance", "AggregatedGroundednessPassRate",
+ "AcceptableCoherenceScorePerInstance", "AggregatedCoherencePassRate",
+ "AcceptableFluencyScorePerInstance", "AggregatedFluencyPassRate",
+ "AcceptableSimilarityScorePerInstance", "AggregatedSimilarityPassRate",
+ "AcceptableRelevanceScorePerInstance", and "AggregatedRelevancePassRate".
+ :paramtype metric: str or
+ ~azure.mgmt.machinelearningservices.models.GenerationSafetyQualityMetric
+ :keyword threshold: Gets or sets the threshold value.
+ If null, a default value will be set depending on the selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+ super().__init__(**kwargs)
+ self.metric = metric
+ self.threshold = threshold
+
+
+class GenerationSafetyQualityMonitoringSignal(MonitoringSignalBase):
+ """Generation safety quality monitoring signal definition.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", "Custom",
+ "ModelPerformance", "GenerationSafetyQuality", and "GenerationTokenStatistics".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar metric_thresholds: [Required] Gets or sets the metrics to calculate and the corresponding
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.GenerationSafetyQualityMetricThreshold]
+ :ivar production_data: Gets or sets the production data for computing metrics.
+ :vartype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :ivar sampling_rate: [Required] The sample rate of the production data, should be greater than
+ 0 and at most 1. Required.
+ :vartype sampling_rate: float
+ :ivar workspace_connection_id: Gets or sets the workspace connection ID used to connect to the
+ content generation endpoint.
+ :vartype workspace_connection_id: str
+ """
+
+ _validation = {
+ "signal_type": {"required": True},
+ "metric_thresholds": {"required": True},
+ "sampling_rate": {"required": True},
+ }
+
+ _attribute_map = {
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[GenerationSafetyQualityMetricThreshold]"},
+ "production_data": {"key": "productionData", "type": "[MonitoringInputDataBase]"},
+ "sampling_rate": {"key": "samplingRate", "type": "float"},
+ "workspace_connection_id": {"key": "workspaceConnectionId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric_thresholds: List["_models.GenerationSafetyQualityMetricThreshold"],
+ sampling_rate: float,
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ production_data: Optional[List["_models.MonitoringInputDataBase"]] = None,
+ workspace_connection_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword metric_thresholds: [Required] Gets or sets the metrics to calculate and the
+ corresponding thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.GenerationSafetyQualityMetricThreshold]
+ :keyword production_data: Gets or sets the production data for computing metrics.
+ :paramtype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :keyword sampling_rate: [Required] The sample rate of the production data, should be greater
+ than 0 and at most 1. Required.
+ :paramtype sampling_rate: float
+ :keyword workspace_connection_id: Gets or sets the workspace connection ID used to connect to
+ the content generation endpoint.
+ :paramtype workspace_connection_id: str
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "GenerationSafetyQuality"
+ self.metric_thresholds = metric_thresholds
+ self.production_data = production_data
+ self.sampling_rate = sampling_rate
+ self.workspace_connection_id = workspace_connection_id
+
+
+class GenerationTokenUsageMetricThreshold(_serialization.Model):
+ """Generation token statistics metric threshold definition.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar metric: [Required] Gets or sets the feature attribution metric to calculate. Required.
+ Known values are: "TotalTokenCount" and "TotalTokenCountPerGroup".
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.GenerationTokenUsageMetric
+ :ivar threshold: Gets or sets the threshold value.
+ If null, a default value will be set depending on the selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+
+ _validation = {
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "metric": {"key": "metric", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.GenerationTokenUsageMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword metric: [Required] Gets or sets the feature attribution metric to calculate. Required.
+ Known values are: "TotalTokenCount" and "TotalTokenCountPerGroup".
+ :paramtype metric: str or ~azure.mgmt.machinelearningservices.models.GenerationTokenUsageMetric
+ :keyword threshold: Gets or sets the threshold value.
+ If null, a default value will be set depending on the selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+ super().__init__(**kwargs)
+ self.metric = metric
+ self.threshold = threshold
+
+
+class GenerationTokenUsageSignal(MonitoringSignalBase):
+ """Generation token usage signal definition.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", "Custom",
+ "ModelPerformance", "GenerationSafetyQuality", and "GenerationTokenStatistics".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar metric_thresholds: [Required] Gets or sets the metrics to calculate and the corresponding
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.GenerationTokenUsageMetricThreshold]
+ :ivar production_data: Gets or sets the production data for computing metrics.
+ :vartype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :ivar sampling_rate: [Required] The sample rate of the production data, should be greater than
+ 0 and at most 1. Required.
+ :vartype sampling_rate: float
+ """
+
+ _validation = {
+ "signal_type": {"required": True},
+ "metric_thresholds": {"required": True},
+ "sampling_rate": {"required": True},
+ }
+
+ _attribute_map = {
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[GenerationTokenUsageMetricThreshold]"},
+ "production_data": {"key": "productionData", "type": "[MonitoringInputDataBase]"},
+ "sampling_rate": {"key": "samplingRate", "type": "float"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric_thresholds: List["_models.GenerationTokenUsageMetricThreshold"],
+ sampling_rate: float,
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ production_data: Optional[List["_models.MonitoringInputDataBase"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword metric_thresholds: [Required] Gets or sets the metrics to calculate and the
+ corresponding thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.GenerationTokenUsageMetricThreshold]
+ :keyword production_data: Gets or sets the production data for computing metrics.
+ :paramtype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :keyword sampling_rate: [Required] The sample rate of the production data, should be greater
+ than 0 and at most 1. Required.
+ :paramtype sampling_rate: float
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "GenerationTokenStatistics"
+ self.metric_thresholds = metric_thresholds
+ self.production_data = production_data
+ self.sampling_rate = sampling_rate
+
+
+class GridSamplingAlgorithm(SamplingAlgorithm):
+ """Defines a Sampling Algorithm that exhaustively generates every value combination in the space.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
+ values, along with configuration properties. Required. Known values are: "Grid", "Random", and
+ "Bayesian".
+ :vartype sampling_algorithm_type: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ """
+
+ _validation = {
+ "sampling_algorithm_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "sampling_algorithm_type": {"key": "samplingAlgorithmType", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.sampling_algorithm_type: str = "Grid"
+
+
+class HdfsDatastore(DatastoreProperties):
+ """HdfsDatastore.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar credentials: [Required] Account credentials. Required.
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", "Hdfs", and "OneLake".
+ :vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
+ :ivar intellectual_property: Intellectual Property details.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :ivar is_default: Readonly property to indicate if datastore is the workspace default
+ datastore.
+ :vartype is_default: bool
+ :ivar hdfs_server_certificate: The TLS cert of the HDFS server. Needs to be a base64 encoded
+ string. Required if "Https" protocol is selected.
+ :vartype hdfs_server_certificate: str
+ :ivar name_node_address: [Required] IP Address or DNS HostName. Required.
+ :vartype name_node_address: str
+ :ivar protocol: Protocol used to communicate with the storage account (Https/Http).
+ :vartype protocol: str
+ """
+
+ _validation = {
+ "credentials": {"required": True},
+ "datastore_type": {"required": True},
+ "is_default": {"readonly": True},
+ "name_node_address": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "credentials": {"key": "credentials", "type": "DatastoreCredentials"},
+ "datastore_type": {"key": "datastoreType", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
+ "is_default": {"key": "isDefault", "type": "bool"},
+ "hdfs_server_certificate": {"key": "hdfsServerCertificate", "type": "str"},
+ "name_node_address": {"key": "nameNodeAddress", "type": "str"},
+ "protocol": {"key": "protocol", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ credentials: "_models.DatastoreCredentials",
+ name_node_address: str,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ hdfs_server_certificate: Optional[str] = None,
+ protocol: str = "http",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword credentials: [Required] Account credentials. Required.
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword intellectual_property: Intellectual Property details.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword hdfs_server_certificate: The TLS cert of the HDFS server. Needs to be a base64 encoded
+ string. Required if "Https" protocol is selected.
+ :paramtype hdfs_server_certificate: str
+ :keyword name_node_address: [Required] IP Address or DNS HostName. Required.
+ :paramtype name_node_address: str
+ :keyword protocol: Protocol used to communicate with the storage account (Https/Http).
+ :paramtype protocol: str
+ """
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ intellectual_property=intellectual_property,
+ **kwargs
+ )
+ self.datastore_type: str = "Hdfs"
+ self.hdfs_server_certificate = hdfs_server_certificate
+ self.name_node_address = name_node_address
+ self.protocol = protocol
+
+
+class HDInsightSchema(_serialization.Model):
+ """HDInsightSchema.
+
+ :ivar properties: HDInsight compute properties.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ """
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "HDInsightProperties"},
+ }
+
+ def __init__(self, *, properties: Optional["_models.HDInsightProperties"] = None, **kwargs: Any) -> None:
+ """
+ :keyword properties: HDInsight compute properties.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class HDInsight(Compute, HDInsightSchema): # pylint: disable=too-many-instance-attributes
+ """A HDInsight compute.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar properties: HDInsight compute properties.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
+ "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
+ "DataLakeAnalytics", and "SynapseSpark".
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
+ :ivar compute_location: Location for the underlying compute.
+ :vartype compute_location: str
+ :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
+ Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
+ "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningState
+ :ivar description: The description of the Machine Learning compute.
+ :vartype description: str
+ :ivar created_on: The time at which the compute was created.
+ :vartype created_on: ~datetime.datetime
+ :ivar modified_on: The time at which the compute was last modified.
+ :vartype modified_on: ~datetime.datetime
+ :ivar resource_id: ARM resource id of the underlying compute.
+ :vartype resource_id: str
+ :ivar provisioning_errors: Errors during provisioning.
+ :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
+ :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
+ from outside if true, or machine learning service provisioned it if false.
+ :vartype is_attached_compute: bool
+ :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
+ and AAD exclusively for authentication.
+ :vartype disable_local_auth: bool
+ """
+
+ _validation = {
+ "compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "created_on": {"readonly": True},
+ "modified_on": {"readonly": True},
+ "provisioning_errors": {"readonly": True},
+ "is_attached_compute": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "HDInsightProperties"},
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_location": {"key": "computeLocation", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "created_on": {"key": "createdOn", "type": "iso-8601"},
+ "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
+ "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
+ "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ }
+
+ def __init__(
+ self,
+ *,
+ properties: Optional["_models.HDInsightProperties"] = None,
+ compute_location: Optional[str] = None,
+ description: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ disable_local_auth: Optional[bool] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword properties: HDInsight compute properties.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ :keyword compute_location: Location for the underlying compute.
+ :paramtype compute_location: str
+ :keyword description: The description of the Machine Learning compute.
+ :paramtype description: str
+ :keyword resource_id: ARM resource id of the underlying compute.
+ :paramtype resource_id: str
+ :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
+ MSI and AAD exclusively for authentication.
+ :paramtype disable_local_auth: bool
+ """
+ super().__init__(
+ compute_location=compute_location,
+ description=description,
+ resource_id=resource_id,
+ disable_local_auth=disable_local_auth,
+ properties=properties,
+ **kwargs
+ )
+ self.properties = properties
+ self.compute_type: str = "HDInsight"
+ self.compute_location = compute_location
+ self.provisioning_state = None
+ self.description = description
+ self.created_on = None
+ self.modified_on = None
+ self.resource_id = resource_id
+ self.provisioning_errors = None
+ self.is_attached_compute = None
+ self.disable_local_auth = disable_local_auth
+
+
+class HDInsightProperties(_serialization.Model):
+ """HDInsight compute properties.
+
+ :ivar ssh_port: Port open for ssh connections on the master node of the cluster.
+ :vartype ssh_port: int
+ :ivar address: Public IP address of the master node of the cluster.
+ :vartype address: str
+ :ivar administrator_account: Admin credentials for master node of the cluster.
+ :vartype administrator_account:
+ ~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
+ """
+
+ _attribute_map = {
+ "ssh_port": {"key": "sshPort", "type": "int"},
+ "address": {"key": "address", "type": "str"},
+ "administrator_account": {"key": "administratorAccount", "type": "VirtualMachineSshCredentials"},
+ }
+
+ def __init__(
+ self,
+ *,
+ ssh_port: Optional[int] = None,
+ address: Optional[str] = None,
+ administrator_account: Optional["_models.VirtualMachineSshCredentials"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ssh_port: Port open for ssh connections on the master node of the cluster.
+ :paramtype ssh_port: int
+ :keyword address: Public IP address of the master node of the cluster.
+ :paramtype address: str
+ :keyword administrator_account: Admin credentials for master node of the cluster.
+ :paramtype administrator_account:
+ ~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
+ """
+ super().__init__(**kwargs)
+ self.ssh_port = ssh_port
+ self.address = address
+ self.administrator_account = administrator_account
+
+
+class IdAssetReference(AssetReferenceBase):
+ """Reference to an asset via its ARM resource ID.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
+ are: "Id", "DataPath", and "OutputPath".
+ :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
+ :ivar asset_id: [Required] ARM resource ID of the asset. Required.
+ :vartype asset_id: str
+ """
+
+ _validation = {
+ "reference_type": {"required": True},
+ "asset_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "reference_type": {"key": "referenceType", "type": "str"},
+ "asset_id": {"key": "assetId", "type": "str"},
+ }
+
+ def __init__(self, *, asset_id: str, **kwargs: Any) -> None:
+ """
+ :keyword asset_id: [Required] ARM resource ID of the asset. Required.
+ :paramtype asset_id: str
+ """
+ super().__init__(**kwargs)
+ self.reference_type: str = "Id"
+ self.asset_id = asset_id
+
+
+class IdentityForCmk(_serialization.Model):
+ """Identity object used for encryption.
+
+ :ivar user_assigned_identity: UserAssignedIdentity to be used to fetch the encryption key from
+ keyVault.
+ :vartype user_assigned_identity: str
+ """
+
+ _attribute_map = {
+ "user_assigned_identity": {"key": "userAssignedIdentity", "type": "str"},
+ }
+
+ def __init__(self, *, user_assigned_identity: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword user_assigned_identity: UserAssignedIdentity to be used to fetch the encryption key
+ from keyVault.
+ :paramtype user_assigned_identity: str
+ """
+ super().__init__(**kwargs)
+ self.user_assigned_identity = user_assigned_identity
+
+
+class IdleShutdownSetting(_serialization.Model):
+ """Stops compute instance after user defined period of inactivity.
+
+ :ivar idle_time_before_shutdown: Time is defined in ISO8601 format. Minimum is 15 min, maximum
+ is 3 days.
+ :vartype idle_time_before_shutdown: str
+ """
+
+ _attribute_map = {
+ "idle_time_before_shutdown": {"key": "idleTimeBeforeShutdown", "type": "str"},
+ }
+
+ def __init__(self, *, idle_time_before_shutdown: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword idle_time_before_shutdown: Time is defined in ISO8601 format. Minimum is 15 min,
+ maximum is 3 days.
+ :paramtype idle_time_before_shutdown: str
+ """
+ super().__init__(**kwargs)
+ self.idle_time_before_shutdown = idle_time_before_shutdown
+
+
+class Image(_serialization.Model):
+ """Image.
+
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :vartype additional_properties: dict[str, any]
+ :ivar type: Type of the image. Possible values are: docker - For docker images. azureml - For
+ AzureML images. Known values are: "docker" and "azureml".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.ImageType
+ :ivar reference: Image reference URL.
+ :vartype reference: str
+ """
+
+ _attribute_map = {
+ "additional_properties": {"key": "", "type": "{object}"},
+ "type": {"key": "type", "type": "str"},
+ "reference": {"key": "reference", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ additional_properties: Optional[Dict[str, Any]] = None,
+ type: Union[str, "_models.ImageType"] = "docker",
+ reference: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ :keyword type: Type of the image. Possible values are: docker - For docker images. azureml -
+ For AzureML images. Known values are: "docker" and "azureml".
+ :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ImageType
+ :keyword reference: Image reference URL.
+ :paramtype reference: str
+ """
+ super().__init__(**kwargs)
+ self.additional_properties = additional_properties
+ self.type = type
+ self.reference = reference
+
+
+class ImageVertical(_serialization.Model):
+ """Abstract class for AutoML tasks that train image (computer vision) models -
+ such as Image Classification / Image Classification Multilabel / Image Object Detection / Image
+ Instance Segmentation.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ """
+
+ _validation = {
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ }
+
+ def __init__(
+ self,
+ *,
+ limit_settings: "_models.ImageLimitSettings",
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ """
+ super().__init__(**kwargs)
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+
+
+class ImageClassificationBase(ImageVertical):
+ """ImageClassificationBase.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ """
+
+ _validation = {
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ limit_settings: "_models.ImageLimitSettings",
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ **kwargs
+ )
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageClassification(ImageClassificationBase, AutoMLVertical): # pylint: disable=too-many-instance-attributes
+ """Image Classification. Multi-class image classification is used when an image is classified with
+ only a single label
+ from a set of classes - e.g. each image is classified as either an image of a 'cat' or a 'dog'
+ or a 'duck'.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :ivar primary_metric: Primary metric to optimize for this task. Known values are:
+ "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and
+ "PrecisionScoreWeighted".
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ """
+
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ training_data: "_models.MLTableJobInput",
+ limit_settings: "_models.ImageLimitSettings",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
+ primary_metric: Optional[Union[str, "_models.ClassificationPrimaryMetrics"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :keyword primary_metric: Primary metric to optimize for this task. Known values are:
+ "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and
+ "PrecisionScoreWeighted".
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ model_settings=model_settings,
+ search_space=search_space,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "ImageClassification"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageClassificationMultilabel(
+ ImageClassificationBase, AutoMLVertical
+): # pylint: disable=too-many-instance-attributes
+ """Image Classification Multilabel. Multi-label image classification is used when an image could
+ have one or more labels
+ from a set of labels - e.g. an image could be labeled with both 'cat' and 'dog'.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :ivar primary_metric: Primary metric to optimize for this task. Known values are:
+ "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
+ "PrecisionScoreWeighted", and "IOU".
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
+ """
+
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ training_data: "_models.MLTableJobInput",
+ limit_settings: "_models.ImageLimitSettings",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
+ primary_metric: Optional[Union[str, "_models.ClassificationMultilabelPrimaryMetrics"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :keyword primary_metric: Primary metric to optimize for this task. Known values are:
+ "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
+ "PrecisionScoreWeighted", and "IOU".
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ model_settings=model_settings,
+ search_space=search_space,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "ImageClassificationMultilabel"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageObjectDetectionBase(ImageVertical):
+ """ImageObjectDetectionBase.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ """
+
+ _validation = {
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ limit_settings: "_models.ImageLimitSettings",
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ **kwargs
+ )
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageInstanceSegmentation(
+ ImageObjectDetectionBase, AutoMLVertical
+): # pylint: disable=too-many-instance-attributes
+ """Image Instance Segmentation. Instance segmentation is used to identify objects in an image at
+ the pixel level,
+ drawing a polygon around each object in the image.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :ivar primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
+ """
+
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ training_data: "_models.MLTableJobInput",
+ limit_settings: "_models.ImageLimitSettings",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
+ primary_metric: Optional[Union[str, "_models.InstanceSegmentationPrimaryMetrics"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :keyword primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ model_settings=model_settings,
+ search_space=search_space,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "ImageInstanceSegmentation"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageLimitSettings(_serialization.Model):
+ """Limit settings for the AutoML job.
+
+ :ivar max_concurrent_trials: Maximum number of concurrent AutoML iterations.
+ :vartype max_concurrent_trials: int
+ :ivar max_trials: Maximum number of AutoML iterations.
+ :vartype max_trials: int
+ :ivar timeout: AutoML job timeout.
+ :vartype timeout: ~datetime.timedelta
+ """
+
+ _attribute_map = {
+ "max_concurrent_trials": {"key": "maxConcurrentTrials", "type": "int"},
+ "max_trials": {"key": "maxTrials", "type": "int"},
+ "timeout": {"key": "timeout", "type": "duration"},
+ }
+
+ def __init__(
+ self, *, max_concurrent_trials: int = 1, max_trials: int = 1, timeout: datetime.timedelta = "P7D", **kwargs: Any
+ ) -> None:
+ """
+ :keyword max_concurrent_trials: Maximum number of concurrent AutoML iterations.
+ :paramtype max_concurrent_trials: int
+ :keyword max_trials: Maximum number of AutoML iterations.
+ :paramtype max_trials: int
+ :keyword timeout: AutoML job timeout.
+ :paramtype timeout: ~datetime.timedelta
+ """
+ super().__init__(**kwargs)
+ self.max_concurrent_trials = max_concurrent_trials
+ self.max_trials = max_trials
+ self.timeout = timeout
+
+
+class ImageMetadata(_serialization.Model):
+ """Returns metadata about the operating system image for this compute instance.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar current_image_version: Specifies the current operating system image version this compute
+ instance is running on.
+ :vartype current_image_version: str
+ :ivar latest_image_version: Specifies the latest available operating system image version.
+ :vartype latest_image_version: str
+ :ivar is_latest_os_image_version: Specifies whether this compute instance is running on the
+ latest operating system image.
+ :vartype is_latest_os_image_version: bool
+ :ivar os_patching_status: Metadata about the os patching.
+ :vartype os_patching_status: ~azure.mgmt.machinelearningservices.models.OsPatchingStatus
+ """
+
+ _validation = {
+ "os_patching_status": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "current_image_version": {"key": "currentImageVersion", "type": "str"},
+ "latest_image_version": {"key": "latestImageVersion", "type": "str"},
+ "is_latest_os_image_version": {"key": "isLatestOsImageVersion", "type": "bool"},
+ "os_patching_status": {"key": "osPatchingStatus", "type": "OsPatchingStatus"},
+ }
+
+ def __init__(
+ self,
+ *,
+ current_image_version: Optional[str] = None,
+ latest_image_version: Optional[str] = None,
+ is_latest_os_image_version: Optional[bool] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword current_image_version: Specifies the current operating system image version this
+ compute instance is running on.
+ :paramtype current_image_version: str
+ :keyword latest_image_version: Specifies the latest available operating system image version.
+ :paramtype latest_image_version: str
+ :keyword is_latest_os_image_version: Specifies whether this compute instance is running on the
+ latest operating system image.
+ :paramtype is_latest_os_image_version: bool
+ """
+ super().__init__(**kwargs)
+ self.current_image_version = current_image_version
+ self.latest_image_version = latest_image_version
+ self.is_latest_os_image_version = is_latest_os_image_version
+ self.os_patching_status = None
+
+
+class ImageModelDistributionSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes
+ """Distribution expressions to sweep over values of model settings.
+
+ :code:`
+ Some examples are:
+ ```
+ ModelName = "choice('seresnext', 'resnest50')";
+ LearningRate = "uniform(0.001, 0.01)";
+ LayersToFreeze = "choice(0, 2)";
+ ````
+ All distributions can be specified as distribution_name(min, max) or choice(val1, val2, ...,
+ valn)
+ where distribution name can be: uniform, quniform, loguniform, etc
+ For more details on how to compose distribution expressions please check the documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: str
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: str
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: str
+ :ivar distributed: Whether to use distributer training.
+ :vartype distributed: str
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: str
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: str
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: str
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: str
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: str
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: str
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: str
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: str
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :vartype learning_rate_scheduler: str
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: str
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: str
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: str
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: str
+ :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :vartype optimizer: str
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: str
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: str
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: str
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: str
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: str
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: str
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: str
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: str
+ """
+
+ _attribute_map = {
+ "ams_gradient": {"key": "amsGradient", "type": "str"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "str"},
+ "beta2": {"key": "beta2", "type": "str"},
+ "distributed": {"key": "distributed", "type": "str"},
+ "early_stopping": {"key": "earlyStopping", "type": "str"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
+ "learning_rate": {"key": "learningRate", "type": "str"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "str"},
+ "nesterov": {"key": "nesterov", "type": "str"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "str"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
+ "weight_decay": {"key": "weightDecay", "type": "str"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ ams_gradient: Optional[str] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[str] = None,
+ beta2: Optional[str] = None,
+ distributed: Optional[str] = None,
+ early_stopping: Optional[str] = None,
+ early_stopping_delay: Optional[str] = None,
+ early_stopping_patience: Optional[str] = None,
+ enable_onnx_normalization: Optional[str] = None,
+ evaluation_frequency: Optional[str] = None,
+ gradient_accumulation_step: Optional[str] = None,
+ layers_to_freeze: Optional[str] = None,
+ learning_rate: Optional[str] = None,
+ learning_rate_scheduler: Optional[str] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[str] = None,
+ nesterov: Optional[str] = None,
+ number_of_epochs: Optional[str] = None,
+ number_of_workers: Optional[str] = None,
+ optimizer: Optional[str] = None,
+ random_seed: Optional[str] = None,
+ step_lr_gamma: Optional[str] = None,
+ step_lr_step_size: Optional[str] = None,
+ training_batch_size: Optional[str] = None,
+ validation_batch_size: Optional[str] = None,
+ warmup_cosine_lr_cycles: Optional[str] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[str] = None,
+ weight_decay: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: str
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: str
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: str
+ :keyword distributed: Whether to use distributer training.
+ :paramtype distributed: str
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: str
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: str
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: str
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: str
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: str
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: str
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: str
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: str
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :paramtype learning_rate_scheduler: str
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: str
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: str
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: str
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: str
+ :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :paramtype optimizer: str
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: str
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: str
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: str
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: str
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: str
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: str
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: str
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: str
+ """
+ super().__init__(**kwargs)
+ self.ams_gradient = ams_gradient
+ self.augmentations = augmentations
+ self.beta1 = beta1
+ self.beta2 = beta2
+ self.distributed = distributed
+ self.early_stopping = early_stopping
+ self.early_stopping_delay = early_stopping_delay
+ self.early_stopping_patience = early_stopping_patience
+ self.enable_onnx_normalization = enable_onnx_normalization
+ self.evaluation_frequency = evaluation_frequency
+ self.gradient_accumulation_step = gradient_accumulation_step
+ self.layers_to_freeze = layers_to_freeze
+ self.learning_rate = learning_rate
+ self.learning_rate_scheduler = learning_rate_scheduler
+ self.model_name = model_name
+ self.momentum = momentum
+ self.nesterov = nesterov
+ self.number_of_epochs = number_of_epochs
+ self.number_of_workers = number_of_workers
+ self.optimizer = optimizer
+ self.random_seed = random_seed
+ self.step_lr_gamma = step_lr_gamma
+ self.step_lr_step_size = step_lr_step_size
+ self.training_batch_size = training_batch_size
+ self.validation_batch_size = validation_batch_size
+ self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
+ self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
+ self.weight_decay = weight_decay
+
+
+class ImageModelDistributionSettingsClassification(
+ ImageModelDistributionSettings
+): # pylint: disable=too-many-instance-attributes
+ """Distribution expressions to sweep over values of model settings.
+
+ :code:`
+ Some examples are:
+ ```
+ ModelName = "choice('seresnext', 'resnest50')";
+ LearningRate = "uniform(0.001, 0.01)";
+ LayersToFreeze = "choice(0, 2)";
+ ````
+ For more details on how to compose distribution expressions please check the documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: str
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: str
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: str
+ :ivar distributed: Whether to use distributer training.
+ :vartype distributed: str
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: str
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: str
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: str
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: str
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: str
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: str
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: str
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: str
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :vartype learning_rate_scheduler: str
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: str
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: str
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: str
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: str
+ :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :vartype optimizer: str
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: str
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: str
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: str
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: str
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: str
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: str
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: str
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: str
+ :ivar training_crop_size: Image crop size that is input to the neural network for the training
+ dataset. Must be a positive integer.
+ :vartype training_crop_size: str
+ :ivar validation_crop_size: Image crop size that is input to the neural network for the
+ validation dataset. Must be a positive integer.
+ :vartype validation_crop_size: str
+ :ivar validation_resize_size: Image size to which to resize before cropping for validation
+ dataset. Must be a positive integer.
+ :vartype validation_resize_size: str
+ :ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+ 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+ 0 or 1 or 2.
+ :vartype weighted_loss: str
+ """
+
+ _attribute_map = {
+ "ams_gradient": {"key": "amsGradient", "type": "str"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "str"},
+ "beta2": {"key": "beta2", "type": "str"},
+ "distributed": {"key": "distributed", "type": "str"},
+ "early_stopping": {"key": "earlyStopping", "type": "str"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
+ "learning_rate": {"key": "learningRate", "type": "str"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "str"},
+ "nesterov": {"key": "nesterov", "type": "str"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "str"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
+ "weight_decay": {"key": "weightDecay", "type": "str"},
+ "training_crop_size": {"key": "trainingCropSize", "type": "str"},
+ "validation_crop_size": {"key": "validationCropSize", "type": "str"},
+ "validation_resize_size": {"key": "validationResizeSize", "type": "str"},
+ "weighted_loss": {"key": "weightedLoss", "type": "str"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ ams_gradient: Optional[str] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[str] = None,
+ beta2: Optional[str] = None,
+ distributed: Optional[str] = None,
+ early_stopping: Optional[str] = None,
+ early_stopping_delay: Optional[str] = None,
+ early_stopping_patience: Optional[str] = None,
+ enable_onnx_normalization: Optional[str] = None,
+ evaluation_frequency: Optional[str] = None,
+ gradient_accumulation_step: Optional[str] = None,
+ layers_to_freeze: Optional[str] = None,
+ learning_rate: Optional[str] = None,
+ learning_rate_scheduler: Optional[str] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[str] = None,
+ nesterov: Optional[str] = None,
+ number_of_epochs: Optional[str] = None,
+ number_of_workers: Optional[str] = None,
+ optimizer: Optional[str] = None,
+ random_seed: Optional[str] = None,
+ step_lr_gamma: Optional[str] = None,
+ step_lr_step_size: Optional[str] = None,
+ training_batch_size: Optional[str] = None,
+ validation_batch_size: Optional[str] = None,
+ warmup_cosine_lr_cycles: Optional[str] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[str] = None,
+ weight_decay: Optional[str] = None,
+ training_crop_size: Optional[str] = None,
+ validation_crop_size: Optional[str] = None,
+ validation_resize_size: Optional[str] = None,
+ weighted_loss: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: str
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: str
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: str
+ :keyword distributed: Whether to use distributer training.
+ :paramtype distributed: str
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: str
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: str
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: str
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: str
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: str
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: str
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: str
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: str
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :paramtype learning_rate_scheduler: str
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: str
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: str
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: str
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: str
+ :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :paramtype optimizer: str
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: str
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: str
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: str
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: str
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: str
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: str
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: str
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: str
+ :keyword training_crop_size: Image crop size that is input to the neural network for the
+ training dataset. Must be a positive integer.
+ :paramtype training_crop_size: str
+ :keyword validation_crop_size: Image crop size that is input to the neural network for the
+ validation dataset. Must be a positive integer.
+ :paramtype validation_crop_size: str
+ :keyword validation_resize_size: Image size to which to resize before cropping for validation
+ dataset. Must be a positive integer.
+ :paramtype validation_resize_size: str
+ :keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+ 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+ 0 or 1 or 2.
+ :paramtype weighted_loss: str
+ """
+ super().__init__(
+ ams_gradient=ams_gradient,
+ augmentations=augmentations,
+ beta1=beta1,
+ beta2=beta2,
+ distributed=distributed,
+ early_stopping=early_stopping,
+ early_stopping_delay=early_stopping_delay,
+ early_stopping_patience=early_stopping_patience,
+ enable_onnx_normalization=enable_onnx_normalization,
+ evaluation_frequency=evaluation_frequency,
+ gradient_accumulation_step=gradient_accumulation_step,
+ layers_to_freeze=layers_to_freeze,
+ learning_rate=learning_rate,
+ learning_rate_scheduler=learning_rate_scheduler,
+ model_name=model_name,
+ momentum=momentum,
+ nesterov=nesterov,
+ number_of_epochs=number_of_epochs,
+ number_of_workers=number_of_workers,
+ optimizer=optimizer,
+ random_seed=random_seed,
+ step_lr_gamma=step_lr_gamma,
+ step_lr_step_size=step_lr_step_size,
+ training_batch_size=training_batch_size,
+ validation_batch_size=validation_batch_size,
+ warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+ warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+ weight_decay=weight_decay,
+ **kwargs
+ )
+ self.training_crop_size = training_crop_size
+ self.validation_crop_size = validation_crop_size
+ self.validation_resize_size = validation_resize_size
+ self.weighted_loss = weighted_loss
+
+
+class ImageModelDistributionSettingsObjectDetection(
+ ImageModelDistributionSettings
+): # pylint: disable=too-many-instance-attributes
+ """Distribution expressions to sweep over values of model settings.
+
+ :code:`
+ Some examples are:
+ ```
+ ModelName = "choice('seresnext', 'resnest50')";
+ LearningRate = "uniform(0.001, 0.01)";
+ LayersToFreeze = "choice(0, 2)";
+ ````
+ For more details on how to compose distribution expressions please check the documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: str
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: str
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: str
+ :ivar distributed: Whether to use distributer training.
+ :vartype distributed: str
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: str
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: str
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: str
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: str
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: str
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: str
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: str
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: str
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :vartype learning_rate_scheduler: str
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: str
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: str
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: str
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: str
+ :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :vartype optimizer: str
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: str
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: str
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: str
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: str
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: str
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: str
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: str
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: str
+ :ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
+ be a positive integer.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype box_detections_per_image: str
+ :ivar box_score_threshold: During inference, only return proposals with a classification score
+ greater than
+ BoxScoreThreshold. Must be a float in the range[0, 1].
+ :vartype box_score_threshold: str
+ :ivar image_size: Image size for train and validation. Must be a positive integer.
+ Note: The training run may get into CUDA OOM if the size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype image_size: str
+ :ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype max_size: str
+ :ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype min_size: str
+ :ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
+ Note: training run may get into CUDA OOM if the model size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype model_size: str
+ :ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
+ Note: training run may get into CUDA OOM if no sufficient GPU memory.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype multi_scale: str
+ :ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
+ float in the range [0, 1].
+ :vartype nms_iou_threshold: str
+ :ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
+ be
+ None to enable small object detection logic. A string containing two integers in mxn format.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_grid_size: str
+ :ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
+ in the range [0, 1).
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_overlap_ratio: str
+ :ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
+ predictions from tiles and image.
+ Used in validation/ inference. Must be float in the range [0, 1].
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ NMS: Non-maximum suppression.
+ :vartype tile_predictions_nms_threshold: str
+ :ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
+ float in the range [0, 1].
+ :vartype validation_iou_threshold: str
+ :ivar validation_metric_type: Metric computation method to use for validation metrics. Must be
+ 'none', 'coco', 'voc', or 'coco_voc'.
+ :vartype validation_metric_type: str
+ """
+
+ _attribute_map = {
+ "ams_gradient": {"key": "amsGradient", "type": "str"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "str"},
+ "beta2": {"key": "beta2", "type": "str"},
+ "distributed": {"key": "distributed", "type": "str"},
+ "early_stopping": {"key": "earlyStopping", "type": "str"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
+ "learning_rate": {"key": "learningRate", "type": "str"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "str"},
+ "nesterov": {"key": "nesterov", "type": "str"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "str"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
+ "weight_decay": {"key": "weightDecay", "type": "str"},
+ "box_detections_per_image": {"key": "boxDetectionsPerImage", "type": "str"},
+ "box_score_threshold": {"key": "boxScoreThreshold", "type": "str"},
+ "image_size": {"key": "imageSize", "type": "str"},
+ "max_size": {"key": "maxSize", "type": "str"},
+ "min_size": {"key": "minSize", "type": "str"},
+ "model_size": {"key": "modelSize", "type": "str"},
+ "multi_scale": {"key": "multiScale", "type": "str"},
+ "nms_iou_threshold": {"key": "nmsIouThreshold", "type": "str"},
+ "tile_grid_size": {"key": "tileGridSize", "type": "str"},
+ "tile_overlap_ratio": {"key": "tileOverlapRatio", "type": "str"},
+ "tile_predictions_nms_threshold": {"key": "tilePredictionsNmsThreshold", "type": "str"},
+ "validation_iou_threshold": {"key": "validationIouThreshold", "type": "str"},
+ "validation_metric_type": {"key": "validationMetricType", "type": "str"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ ams_gradient: Optional[str] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[str] = None,
+ beta2: Optional[str] = None,
+ distributed: Optional[str] = None,
+ early_stopping: Optional[str] = None,
+ early_stopping_delay: Optional[str] = None,
+ early_stopping_patience: Optional[str] = None,
+ enable_onnx_normalization: Optional[str] = None,
+ evaluation_frequency: Optional[str] = None,
+ gradient_accumulation_step: Optional[str] = None,
+ layers_to_freeze: Optional[str] = None,
+ learning_rate: Optional[str] = None,
+ learning_rate_scheduler: Optional[str] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[str] = None,
+ nesterov: Optional[str] = None,
+ number_of_epochs: Optional[str] = None,
+ number_of_workers: Optional[str] = None,
+ optimizer: Optional[str] = None,
+ random_seed: Optional[str] = None,
+ step_lr_gamma: Optional[str] = None,
+ step_lr_step_size: Optional[str] = None,
+ training_batch_size: Optional[str] = None,
+ validation_batch_size: Optional[str] = None,
+ warmup_cosine_lr_cycles: Optional[str] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[str] = None,
+ weight_decay: Optional[str] = None,
+ box_detections_per_image: Optional[str] = None,
+ box_score_threshold: Optional[str] = None,
+ image_size: Optional[str] = None,
+ max_size: Optional[str] = None,
+ min_size: Optional[str] = None,
+ model_size: Optional[str] = None,
+ multi_scale: Optional[str] = None,
+ nms_iou_threshold: Optional[str] = None,
+ tile_grid_size: Optional[str] = None,
+ tile_overlap_ratio: Optional[str] = None,
+ tile_predictions_nms_threshold: Optional[str] = None,
+ validation_iou_threshold: Optional[str] = None,
+ validation_metric_type: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: str
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: str
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: str
+ :keyword distributed: Whether to use distributer training.
+ :paramtype distributed: str
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: str
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: str
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
:paramtype early_stopping_patience: str
:keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: str
+ :paramtype enable_onnx_normalization: str
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: str
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: str
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: str
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: str
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :paramtype learning_rate_scheduler: str
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: str
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: str
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: str
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: str
+ :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :paramtype optimizer: str
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: str
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: str
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: str
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: str
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: str
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: str
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: str
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: str
+ :keyword box_detections_per_image: Maximum number of detections per image, for all classes.
+ Must be a positive integer.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype box_detections_per_image: str
+ :keyword box_score_threshold: During inference, only return proposals with a classification
+ score greater than
+ BoxScoreThreshold. Must be a float in the range[0, 1].
+ :paramtype box_score_threshold: str
+ :keyword image_size: Image size for train and validation. Must be a positive integer.
+ Note: The training run may get into CUDA OOM if the size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :paramtype image_size: str
+ :keyword max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype max_size: str
+ :keyword min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype min_size: str
+ :keyword model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
+ Note: training run may get into CUDA OOM if the model size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :paramtype model_size: str
+ :keyword multi_scale: Enable multi-scale image by varying image size by +/- 50%.
+ Note: training run may get into CUDA OOM if no sufficient GPU memory.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :paramtype multi_scale: str
+ :keyword nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
+ float in the range [0, 1].
+ :paramtype nms_iou_threshold: str
+ :keyword tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must
+ not be
+ None to enable small object detection logic. A string containing two integers in mxn format.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype tile_grid_size: str
+ :keyword tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be
+ float in the range [0, 1).
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype tile_overlap_ratio: str
+ :keyword tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
+ predictions from tiles and image.
+ Used in validation/ inference. Must be float in the range [0, 1].
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ NMS: Non-maximum suppression.
+ :paramtype tile_predictions_nms_threshold: str
+ :keyword validation_iou_threshold: IOU threshold to use when computing validation metric. Must
+ be float in the range [0, 1].
+ :paramtype validation_iou_threshold: str
+ :keyword validation_metric_type: Metric computation method to use for validation metrics. Must
+ be 'none', 'coco', 'voc', or 'coco_voc'.
+ :paramtype validation_metric_type: str
+ """
+ super().__init__(
+ ams_gradient=ams_gradient,
+ augmentations=augmentations,
+ beta1=beta1,
+ beta2=beta2,
+ distributed=distributed,
+ early_stopping=early_stopping,
+ early_stopping_delay=early_stopping_delay,
+ early_stopping_patience=early_stopping_patience,
+ enable_onnx_normalization=enable_onnx_normalization,
+ evaluation_frequency=evaluation_frequency,
+ gradient_accumulation_step=gradient_accumulation_step,
+ layers_to_freeze=layers_to_freeze,
+ learning_rate=learning_rate,
+ learning_rate_scheduler=learning_rate_scheduler,
+ model_name=model_name,
+ momentum=momentum,
+ nesterov=nesterov,
+ number_of_epochs=number_of_epochs,
+ number_of_workers=number_of_workers,
+ optimizer=optimizer,
+ random_seed=random_seed,
+ step_lr_gamma=step_lr_gamma,
+ step_lr_step_size=step_lr_step_size,
+ training_batch_size=training_batch_size,
+ validation_batch_size=validation_batch_size,
+ warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+ warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+ weight_decay=weight_decay,
+ **kwargs
+ )
+ self.box_detections_per_image = box_detections_per_image
+ self.box_score_threshold = box_score_threshold
+ self.image_size = image_size
+ self.max_size = max_size
+ self.min_size = min_size
+ self.model_size = model_size
+ self.multi_scale = multi_scale
+ self.nms_iou_threshold = nms_iou_threshold
+ self.tile_grid_size = tile_grid_size
+ self.tile_overlap_ratio = tile_overlap_ratio
+ self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
+ self.validation_iou_threshold = validation_iou_threshold
+ self.validation_metric_type = validation_metric_type
+
+
+class ImageModelSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes
+ """Settings used for training the model.
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar advanced_settings: Settings for advanced scenarios.
+ :vartype advanced_settings: str
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: bool
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: float
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: float
+ :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
+ :vartype checkpoint_frequency: int
+ :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
+ :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :vartype checkpoint_run_id: str
+ :ivar distributed: Whether to use distributed training.
+ :vartype distributed: bool
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: bool
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: int
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: int
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: bool
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: int
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: int
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: int
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: float
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :vartype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: float
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: bool
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: int
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: int
+ :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: int
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: float
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: int
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: int
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: int
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: float
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: int
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: float
+ """
+
+ _attribute_map = {
+ "advanced_settings": {"key": "advancedSettings", "type": "str"},
+ "ams_gradient": {"key": "amsGradient", "type": "bool"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "float"},
+ "beta2": {"key": "beta2", "type": "float"},
+ "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
+ "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
+ "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
+ "distributed": {"key": "distributed", "type": "bool"},
+ "early_stopping": {"key": "earlyStopping", "type": "bool"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
+ "learning_rate": {"key": "learningRate", "type": "float"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "float"},
+ "nesterov": {"key": "nesterov", "type": "bool"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "int"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
+ "weight_decay": {"key": "weightDecay", "type": "float"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ advanced_settings: Optional[str] = None,
+ ams_gradient: Optional[bool] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[float] = None,
+ beta2: Optional[float] = None,
+ checkpoint_frequency: Optional[int] = None,
+ checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
+ checkpoint_run_id: Optional[str] = None,
+ distributed: Optional[bool] = None,
+ early_stopping: Optional[bool] = None,
+ early_stopping_delay: Optional[int] = None,
+ early_stopping_patience: Optional[int] = None,
+ enable_onnx_normalization: Optional[bool] = None,
+ evaluation_frequency: Optional[int] = None,
+ gradient_accumulation_step: Optional[int] = None,
+ layers_to_freeze: Optional[int] = None,
+ learning_rate: Optional[float] = None,
+ learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[float] = None,
+ nesterov: Optional[bool] = None,
+ number_of_epochs: Optional[int] = None,
+ number_of_workers: Optional[int] = None,
+ optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
+ random_seed: Optional[int] = None,
+ step_lr_gamma: Optional[float] = None,
+ step_lr_step_size: Optional[int] = None,
+ training_batch_size: Optional[int] = None,
+ validation_batch_size: Optional[int] = None,
+ warmup_cosine_lr_cycles: Optional[float] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+ weight_decay: Optional[float] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword advanced_settings: Settings for advanced scenarios.
+ :paramtype advanced_settings: str
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: bool
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: float
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: float
+ :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
+ integer.
+ :paramtype checkpoint_frequency: int
+ :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
+ :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :paramtype checkpoint_run_id: str
+ :keyword distributed: Whether to use distributed training.
+ :paramtype distributed: bool
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: bool
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: int
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: int
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: bool
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: int
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: int
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: int
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: float
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :paramtype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: float
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: bool
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: int
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: int
+ :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: int
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: float
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: int
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: int
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: int
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: float
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: int
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: float
+ """
+ super().__init__(**kwargs)
+ self.advanced_settings = advanced_settings
+ self.ams_gradient = ams_gradient
+ self.augmentations = augmentations
+ self.beta1 = beta1
+ self.beta2 = beta2
+ self.checkpoint_frequency = checkpoint_frequency
+ self.checkpoint_model = checkpoint_model
+ self.checkpoint_run_id = checkpoint_run_id
+ self.distributed = distributed
+ self.early_stopping = early_stopping
+ self.early_stopping_delay = early_stopping_delay
+ self.early_stopping_patience = early_stopping_patience
+ self.enable_onnx_normalization = enable_onnx_normalization
+ self.evaluation_frequency = evaluation_frequency
+ self.gradient_accumulation_step = gradient_accumulation_step
+ self.layers_to_freeze = layers_to_freeze
+ self.learning_rate = learning_rate
+ self.learning_rate_scheduler = learning_rate_scheduler
+ self.model_name = model_name
+ self.momentum = momentum
+ self.nesterov = nesterov
+ self.number_of_epochs = number_of_epochs
+ self.number_of_workers = number_of_workers
+ self.optimizer = optimizer
+ self.random_seed = random_seed
+ self.step_lr_gamma = step_lr_gamma
+ self.step_lr_step_size = step_lr_step_size
+ self.training_batch_size = training_batch_size
+ self.validation_batch_size = validation_batch_size
+ self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
+ self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
+ self.weight_decay = weight_decay
+
+
+class ImageModelSettingsClassification(ImageModelSettings): # pylint: disable=too-many-instance-attributes
+ """Settings used for training the model.
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar advanced_settings: Settings for advanced scenarios.
+ :vartype advanced_settings: str
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: bool
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: float
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: float
+ :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
+ :vartype checkpoint_frequency: int
+ :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
+ :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :vartype checkpoint_run_id: str
+ :ivar distributed: Whether to use distributed training.
+ :vartype distributed: bool
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: bool
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: int
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: int
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: bool
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: int
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: int
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: int
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: float
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :vartype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: float
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: bool
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: int
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: int
+ :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: int
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: float
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: int
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: int
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: int
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: float
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: int
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: float
+ :ivar training_crop_size: Image crop size that is input to the neural network for the training
+ dataset. Must be a positive integer.
+ :vartype training_crop_size: int
+ :ivar validation_crop_size: Image crop size that is input to the neural network for the
+ validation dataset. Must be a positive integer.
+ :vartype validation_crop_size: int
+ :ivar validation_resize_size: Image size to which to resize before cropping for validation
+ dataset. Must be a positive integer.
+ :vartype validation_resize_size: int
+ :ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+ 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+ 0 or 1 or 2.
+ :vartype weighted_loss: int
+ """
+
+ _attribute_map = {
+ "advanced_settings": {"key": "advancedSettings", "type": "str"},
+ "ams_gradient": {"key": "amsGradient", "type": "bool"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "float"},
+ "beta2": {"key": "beta2", "type": "float"},
+ "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
+ "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
+ "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
+ "distributed": {"key": "distributed", "type": "bool"},
+ "early_stopping": {"key": "earlyStopping", "type": "bool"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
+ "learning_rate": {"key": "learningRate", "type": "float"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "float"},
+ "nesterov": {"key": "nesterov", "type": "bool"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "int"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
+ "weight_decay": {"key": "weightDecay", "type": "float"},
+ "training_crop_size": {"key": "trainingCropSize", "type": "int"},
+ "validation_crop_size": {"key": "validationCropSize", "type": "int"},
+ "validation_resize_size": {"key": "validationResizeSize", "type": "int"},
+ "weighted_loss": {"key": "weightedLoss", "type": "int"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ advanced_settings: Optional[str] = None,
+ ams_gradient: Optional[bool] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[float] = None,
+ beta2: Optional[float] = None,
+ checkpoint_frequency: Optional[int] = None,
+ checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
+ checkpoint_run_id: Optional[str] = None,
+ distributed: Optional[bool] = None,
+ early_stopping: Optional[bool] = None,
+ early_stopping_delay: Optional[int] = None,
+ early_stopping_patience: Optional[int] = None,
+ enable_onnx_normalization: Optional[bool] = None,
+ evaluation_frequency: Optional[int] = None,
+ gradient_accumulation_step: Optional[int] = None,
+ layers_to_freeze: Optional[int] = None,
+ learning_rate: Optional[float] = None,
+ learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[float] = None,
+ nesterov: Optional[bool] = None,
+ number_of_epochs: Optional[int] = None,
+ number_of_workers: Optional[int] = None,
+ optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
+ random_seed: Optional[int] = None,
+ step_lr_gamma: Optional[float] = None,
+ step_lr_step_size: Optional[int] = None,
+ training_batch_size: Optional[int] = None,
+ validation_batch_size: Optional[int] = None,
+ warmup_cosine_lr_cycles: Optional[float] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+ weight_decay: Optional[float] = None,
+ training_crop_size: Optional[int] = None,
+ validation_crop_size: Optional[int] = None,
+ validation_resize_size: Optional[int] = None,
+ weighted_loss: Optional[int] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword advanced_settings: Settings for advanced scenarios.
+ :paramtype advanced_settings: str
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: bool
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: float
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: float
+ :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
+ integer.
+ :paramtype checkpoint_frequency: int
+ :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
+ :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :paramtype checkpoint_run_id: str
+ :keyword distributed: Whether to use distributed training.
+ :paramtype distributed: bool
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: bool
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: int
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: int
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: bool
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: int
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: int
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: int
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: float
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :paramtype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: float
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: bool
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: int
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: int
+ :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: int
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: float
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: int
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: int
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: int
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: float
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: int
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: float
+ :keyword training_crop_size: Image crop size that is input to the neural network for the
+ training dataset. Must be a positive integer.
+ :paramtype training_crop_size: int
+ :keyword validation_crop_size: Image crop size that is input to the neural network for the
+ validation dataset. Must be a positive integer.
+ :paramtype validation_crop_size: int
+ :keyword validation_resize_size: Image size to which to resize before cropping for validation
+ dataset. Must be a positive integer.
+ :paramtype validation_resize_size: int
+ :keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+ 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+ 0 or 1 or 2.
+ :paramtype weighted_loss: int
+ """
+ super().__init__(
+ advanced_settings=advanced_settings,
+ ams_gradient=ams_gradient,
+ augmentations=augmentations,
+ beta1=beta1,
+ beta2=beta2,
+ checkpoint_frequency=checkpoint_frequency,
+ checkpoint_model=checkpoint_model,
+ checkpoint_run_id=checkpoint_run_id,
+ distributed=distributed,
+ early_stopping=early_stopping,
+ early_stopping_delay=early_stopping_delay,
+ early_stopping_patience=early_stopping_patience,
+ enable_onnx_normalization=enable_onnx_normalization,
+ evaluation_frequency=evaluation_frequency,
+ gradient_accumulation_step=gradient_accumulation_step,
+ layers_to_freeze=layers_to_freeze,
+ learning_rate=learning_rate,
+ learning_rate_scheduler=learning_rate_scheduler,
+ model_name=model_name,
+ momentum=momentum,
+ nesterov=nesterov,
+ number_of_epochs=number_of_epochs,
+ number_of_workers=number_of_workers,
+ optimizer=optimizer,
+ random_seed=random_seed,
+ step_lr_gamma=step_lr_gamma,
+ step_lr_step_size=step_lr_step_size,
+ training_batch_size=training_batch_size,
+ validation_batch_size=validation_batch_size,
+ warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+ warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+ weight_decay=weight_decay,
+ **kwargs
+ )
+ self.training_crop_size = training_crop_size
+ self.validation_crop_size = validation_crop_size
+ self.validation_resize_size = validation_resize_size
+ self.weighted_loss = weighted_loss
+
+
+class ImageModelSettingsObjectDetection(ImageModelSettings): # pylint: disable=too-many-instance-attributes
+ """Settings used for training the model.
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar advanced_settings: Settings for advanced scenarios.
+ :vartype advanced_settings: str
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: bool
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: float
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: float
+ :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
+ :vartype checkpoint_frequency: int
+ :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
+ :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :vartype checkpoint_run_id: str
+ :ivar distributed: Whether to use distributed training.
+ :vartype distributed: bool
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: bool
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: int
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: int
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: bool
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: int
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: int
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: int
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: float
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :vartype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: float
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: bool
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: int
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: int
+ :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: int
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: float
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: int
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: int
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: int
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: float
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: int
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: float
+ :ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
+ be a positive integer.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype box_detections_per_image: int
+ :ivar box_score_threshold: During inference, only return proposals with a classification score
+ greater than
+ BoxScoreThreshold. Must be a float in the range[0, 1].
+ :vartype box_score_threshold: float
+ :ivar image_size: Image size for train and validation. Must be a positive integer.
+ Note: The training run may get into CUDA OOM if the size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype image_size: int
+ :ivar log_training_metrics: Enable computing and logging training metrics. Known values are:
+ "Enable" and "Disable".
+ :vartype log_training_metrics: str or
+ ~azure.mgmt.machinelearningservices.models.LogTrainingMetrics
+ :ivar log_validation_loss: Enable computing and logging validation loss. Known values are:
+ "Enable" and "Disable".
+ :vartype log_validation_loss: str or
+ ~azure.mgmt.machinelearningservices.models.LogValidationLoss
+ :ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype max_size: int
+ :ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype min_size: int
+ :ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
+ Note: training run may get into CUDA OOM if the model size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm. Known values are: "None",
+ "Small", "Medium", "Large", and "ExtraLarge".
+ :vartype model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
+ :ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
+ Note: training run may get into CUDA OOM if no sufficient GPU memory.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype multi_scale: bool
+ :ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be a
+ float in the range [0, 1].
+ :vartype nms_iou_threshold: float
+ :ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
+ be
+ None to enable small object detection logic. A string containing two integers in mxn format.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_grid_size: str
+ :ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
+ in the range [0, 1).
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_overlap_ratio: float
+ :ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
+ predictions from tiles and image.
+ Used in validation/ inference. Must be float in the range [0, 1].
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_predictions_nms_threshold: float
+ :ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
+ float in the range [0, 1].
+ :vartype validation_iou_threshold: float
+ :ivar validation_metric_type: Metric computation method to use for validation metrics. Known
+ values are: "None", "Coco", "Voc", and "CocoVoc".
+ :vartype validation_metric_type: str or
+ ~azure.mgmt.machinelearningservices.models.ValidationMetricType
+ """
+
+ _attribute_map = {
+ "advanced_settings": {"key": "advancedSettings", "type": "str"},
+ "ams_gradient": {"key": "amsGradient", "type": "bool"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "float"},
+ "beta2": {"key": "beta2", "type": "float"},
+ "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
+ "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
+ "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
+ "distributed": {"key": "distributed", "type": "bool"},
+ "early_stopping": {"key": "earlyStopping", "type": "bool"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
+ "learning_rate": {"key": "learningRate", "type": "float"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "float"},
+ "nesterov": {"key": "nesterov", "type": "bool"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "int"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
+ "weight_decay": {"key": "weightDecay", "type": "float"},
+ "box_detections_per_image": {"key": "boxDetectionsPerImage", "type": "int"},
+ "box_score_threshold": {"key": "boxScoreThreshold", "type": "float"},
+ "image_size": {"key": "imageSize", "type": "int"},
+ "log_training_metrics": {"key": "logTrainingMetrics", "type": "str"},
+ "log_validation_loss": {"key": "logValidationLoss", "type": "str"},
+ "max_size": {"key": "maxSize", "type": "int"},
+ "min_size": {"key": "minSize", "type": "int"},
+ "model_size": {"key": "modelSize", "type": "str"},
+ "multi_scale": {"key": "multiScale", "type": "bool"},
+ "nms_iou_threshold": {"key": "nmsIouThreshold", "type": "float"},
+ "tile_grid_size": {"key": "tileGridSize", "type": "str"},
+ "tile_overlap_ratio": {"key": "tileOverlapRatio", "type": "float"},
+ "tile_predictions_nms_threshold": {"key": "tilePredictionsNmsThreshold", "type": "float"},
+ "validation_iou_threshold": {"key": "validationIouThreshold", "type": "float"},
+ "validation_metric_type": {"key": "validationMetricType", "type": "str"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ advanced_settings: Optional[str] = None,
+ ams_gradient: Optional[bool] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[float] = None,
+ beta2: Optional[float] = None,
+ checkpoint_frequency: Optional[int] = None,
+ checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
+ checkpoint_run_id: Optional[str] = None,
+ distributed: Optional[bool] = None,
+ early_stopping: Optional[bool] = None,
+ early_stopping_delay: Optional[int] = None,
+ early_stopping_patience: Optional[int] = None,
+ enable_onnx_normalization: Optional[bool] = None,
+ evaluation_frequency: Optional[int] = None,
+ gradient_accumulation_step: Optional[int] = None,
+ layers_to_freeze: Optional[int] = None,
+ learning_rate: Optional[float] = None,
+ learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[float] = None,
+ nesterov: Optional[bool] = None,
+ number_of_epochs: Optional[int] = None,
+ number_of_workers: Optional[int] = None,
+ optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
+ random_seed: Optional[int] = None,
+ step_lr_gamma: Optional[float] = None,
+ step_lr_step_size: Optional[int] = None,
+ training_batch_size: Optional[int] = None,
+ validation_batch_size: Optional[int] = None,
+ warmup_cosine_lr_cycles: Optional[float] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+ weight_decay: Optional[float] = None,
+ box_detections_per_image: Optional[int] = None,
+ box_score_threshold: Optional[float] = None,
+ image_size: Optional[int] = None,
+ log_training_metrics: Optional[Union[str, "_models.LogTrainingMetrics"]] = None,
+ log_validation_loss: Optional[Union[str, "_models.LogValidationLoss"]] = None,
+ max_size: Optional[int] = None,
+ min_size: Optional[int] = None,
+ model_size: Optional[Union[str, "_models.ModelSize"]] = None,
+ multi_scale: Optional[bool] = None,
+ nms_iou_threshold: Optional[float] = None,
+ tile_grid_size: Optional[str] = None,
+ tile_overlap_ratio: Optional[float] = None,
+ tile_predictions_nms_threshold: Optional[float] = None,
+ validation_iou_threshold: Optional[float] = None,
+ validation_metric_type: Optional[Union[str, "_models.ValidationMetricType"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword advanced_settings: Settings for advanced scenarios.
+ :paramtype advanced_settings: str
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: bool
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: float
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: float
+ :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
+ integer.
+ :paramtype checkpoint_frequency: int
+ :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
+ :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :paramtype checkpoint_run_id: str
+ :keyword distributed: Whether to use distributed training.
+ :paramtype distributed: bool
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: bool
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: int
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: int
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: bool
:keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
Must be a positive integer.
- :paramtype evaluation_frequency: str
+ :paramtype evaluation_frequency: int
:keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: str
+ :paramtype gradient_accumulation_step: int
:keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: str
+ :paramtype layers_to_freeze: int
:keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: str
+ :paramtype learning_rate: float
:keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :paramtype learning_rate_scheduler: str
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :paramtype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
:keyword model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype model_name: str
:keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
1].
- :paramtype momentum: str
+ :paramtype momentum: float
:keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: str
+ :paramtype nesterov: bool
:keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: str
+ :paramtype number_of_epochs: int
:keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: str
- :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :paramtype optimizer: str
+ :paramtype number_of_workers: int
+ :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
:keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: str
+ :paramtype random_seed: int
:keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
in the range [0, 1].
- :paramtype step_lr_gamma: str
+ :paramtype step_lr_gamma: float
:keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
a positive integer.
- :paramtype step_lr_step_size: str
+ :paramtype step_lr_step_size: int
:keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: str
+ :paramtype training_batch_size: int
:keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: str
+ :paramtype validation_batch_size: int
:keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: str
+ :paramtype warmup_cosine_lr_cycles: float
:keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: str
+ :paramtype warmup_cosine_lr_warmup_epochs: int
:keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
be a float in the range[0, 1].
- :paramtype weight_decay: str
+ :paramtype weight_decay: float
:keyword box_detections_per_image: Maximum number of detections per image, for all classes.
Must be a positive integer.
Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype box_detections_per_image: str
+ :paramtype box_detections_per_image: int
:keyword box_score_threshold: During inference, only return proposals with a classification
score greater than
BoxScoreThreshold. Must be a float in the range[0, 1].
- :paramtype box_score_threshold: str
+ :paramtype box_score_threshold: float
:keyword image_size: Image size for train and validation. Must be a positive integer.
Note: The training run may get into CUDA OOM if the size is too big.
Note: This settings is only supported for the 'yolov5' algorithm.
- :paramtype image_size: str
+ :paramtype image_size: int
+ :keyword log_training_metrics: Enable computing and logging training metrics. Known values are:
+ "Enable" and "Disable".
+ :paramtype log_training_metrics: str or
+ ~azure.mgmt.machinelearningservices.models.LogTrainingMetrics
+ :keyword log_validation_loss: Enable computing and logging validation loss. Known values are:
+ "Enable" and "Disable".
+ :paramtype log_validation_loss: str or
+ ~azure.mgmt.machinelearningservices.models.LogValidationLoss
:keyword max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype max_size: str
+ :paramtype max_size: int
:keyword min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype min_size: str
+ :paramtype min_size: int
:keyword model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
Note: training run may get into CUDA OOM if the model size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :paramtype model_size: str
+ Note: This settings is only supported for the 'yolov5' algorithm. Known values are: "None",
+ "Small", "Medium", "Large", and "ExtraLarge".
+ :paramtype model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
:keyword multi_scale: Enable multi-scale image by varying image size by +/- 50%.
Note: training run may get into CUDA OOM if no sufficient GPU memory.
Note: This settings is only supported for the 'yolov5' algorithm.
- :paramtype multi_scale: str
+ :paramtype multi_scale: bool
:keyword nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
- float in the range [0, 1].
- :paramtype nms_iou_threshold: str
+ a float in the range [0, 1].
+ :paramtype nms_iou_threshold: float
:keyword tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must
not be
None to enable small object detection logic. A string containing two integers in mxn format.
@@ -11882,3154 +18019,5946 @@ def __init__( # pylint: disable=too-many-locals
:keyword tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be
float in the range [0, 1).
Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype tile_overlap_ratio: str
+ :paramtype tile_overlap_ratio: float
:keyword tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
predictions from tiles and image.
Used in validation/ inference. Must be float in the range [0, 1].
Note: This settings is not supported for the 'yolov5' algorithm.
- NMS: Non-maximum suppression.
- :paramtype tile_predictions_nms_threshold: str
+ :paramtype tile_predictions_nms_threshold: float
:keyword validation_iou_threshold: IOU threshold to use when computing validation metric. Must
be float in the range [0, 1].
- :paramtype validation_iou_threshold: str
- :keyword validation_metric_type: Metric computation method to use for validation metrics. Must
- be 'none', 'coco', 'voc', or 'coco_voc'.
- :paramtype validation_metric_type: str
+ :paramtype validation_iou_threshold: float
+ :keyword validation_metric_type: Metric computation method to use for validation metrics. Known
+ values are: "None", "Coco", "Voc", and "CocoVoc".
+ :paramtype validation_metric_type: str or
+ ~azure.mgmt.machinelearningservices.models.ValidationMetricType
+ """
+ super().__init__(
+ advanced_settings=advanced_settings,
+ ams_gradient=ams_gradient,
+ augmentations=augmentations,
+ beta1=beta1,
+ beta2=beta2,
+ checkpoint_frequency=checkpoint_frequency,
+ checkpoint_model=checkpoint_model,
+ checkpoint_run_id=checkpoint_run_id,
+ distributed=distributed,
+ early_stopping=early_stopping,
+ early_stopping_delay=early_stopping_delay,
+ early_stopping_patience=early_stopping_patience,
+ enable_onnx_normalization=enable_onnx_normalization,
+ evaluation_frequency=evaluation_frequency,
+ gradient_accumulation_step=gradient_accumulation_step,
+ layers_to_freeze=layers_to_freeze,
+ learning_rate=learning_rate,
+ learning_rate_scheduler=learning_rate_scheduler,
+ model_name=model_name,
+ momentum=momentum,
+ nesterov=nesterov,
+ number_of_epochs=number_of_epochs,
+ number_of_workers=number_of_workers,
+ optimizer=optimizer,
+ random_seed=random_seed,
+ step_lr_gamma=step_lr_gamma,
+ step_lr_step_size=step_lr_step_size,
+ training_batch_size=training_batch_size,
+ validation_batch_size=validation_batch_size,
+ warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+ warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+ weight_decay=weight_decay,
+ **kwargs
+ )
+ self.box_detections_per_image = box_detections_per_image
+ self.box_score_threshold = box_score_threshold
+ self.image_size = image_size
+ self.log_training_metrics = log_training_metrics
+ self.log_validation_loss = log_validation_loss
+ self.max_size = max_size
+ self.min_size = min_size
+ self.model_size = model_size
+ self.multi_scale = multi_scale
+ self.nms_iou_threshold = nms_iou_threshold
+ self.tile_grid_size = tile_grid_size
+ self.tile_overlap_ratio = tile_overlap_ratio
+ self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
+ self.validation_iou_threshold = validation_iou_threshold
+ self.validation_metric_type = validation_metric_type
+
+
+class ImageObjectDetection(ImageObjectDetectionBase, AutoMLVertical): # pylint: disable=too-many-instance-attributes
+ """Image Object Detection. Object detection is used to identify objects in an image and locate
+ each object with a
+ bounding box e.g. locate all dogs and cats in an image and draw a bounding box around each.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :ivar primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
+ """
+
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ training_data: "_models.MLTableJobInput",
+ limit_settings: "_models.ImageLimitSettings",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
+ primary_metric: Optional[Union[str, "_models.ObjectDetectionPrimaryMetrics"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :keyword primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ model_settings=model_settings,
+ search_space=search_space,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "ImageObjectDetection"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageSweepSettings(_serialization.Model):
+ """Model sweeping and hyperparameter sweeping related settings.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar early_termination: Type of early termination policy.
+ :vartype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
+ :ivar sampling_algorithm: [Required] Type of the hyperparameter sampling algorithms. Required.
+ Known values are: "Grid", "Random", and "Bayesian".
+ :vartype sampling_algorithm: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ """
+
+ _validation = {
+ "sampling_algorithm": {"required": True},
+ }
+
+ _attribute_map = {
+ "early_termination": {"key": "earlyTermination", "type": "EarlyTerminationPolicy"},
+ "sampling_algorithm": {"key": "samplingAlgorithm", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ sampling_algorithm: Union[str, "_models.SamplingAlgorithmType"],
+ early_termination: Optional["_models.EarlyTerminationPolicy"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword early_termination: Type of early termination policy.
+ :paramtype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
+ :keyword sampling_algorithm: [Required] Type of the hyperparameter sampling algorithms.
+ Required. Known values are: "Grid", "Random", and "Bayesian".
+ :paramtype sampling_algorithm: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ """
+ super().__init__(**kwargs)
+ self.early_termination = early_termination
+ self.sampling_algorithm = sampling_algorithm
+
+
+class ImportDataAction(ScheduleActionBase):
+ """ImportDataAction.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
+ are: "CreateJob", "InvokeBatchEndpoint", "ImportData", and "CreateMonitor".
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
+ :ivar data_import_definition: [Required] Defines Schedule action definition details. Required.
+ :vartype data_import_definition: ~azure.mgmt.machinelearningservices.models.DataImport
+ """
+
+ _validation = {
+ "action_type": {"required": True},
+ "data_import_definition": {"required": True},
+ }
+
+ _attribute_map = {
+ "action_type": {"key": "actionType", "type": "str"},
+ "data_import_definition": {"key": "dataImportDefinition", "type": "DataImport"},
+ }
+
+ def __init__(self, *, data_import_definition: "_models.DataImport", **kwargs: Any) -> None:
+ """
+ :keyword data_import_definition: [Required] Defines Schedule action definition details.
+ Required.
+ :paramtype data_import_definition: ~azure.mgmt.machinelearningservices.models.DataImport
+ """
+ super().__init__(**kwargs)
+ self.action_type: str = "ImportData"
+ self.data_import_definition = data_import_definition
+
+
+class IndexColumn(_serialization.Model):
+ """Dto object representing index column.
+
+ :ivar column_name: Specifies the column name.
+ :vartype column_name: str
+ :ivar data_type: Specifies the data type. Known values are: "String", "Integer", "Long",
+ "Float", "Double", "Binary", "Datetime", and "Boolean".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.FeatureDataType
+ """
+
+ _attribute_map = {
+ "column_name": {"key": "columnName", "type": "str"},
+ "data_type": {"key": "dataType", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ column_name: Optional[str] = None,
+ data_type: Optional[Union[str, "_models.FeatureDataType"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword column_name: Specifies the column name.
+ :paramtype column_name: str
+ :keyword data_type: Specifies the data type. Known values are: "String", "Integer", "Long",
+ "Float", "Double", "Binary", "Datetime", and "Boolean".
+ :paramtype data_type: str or ~azure.mgmt.machinelearningservices.models.FeatureDataType
+ """
+ super().__init__(**kwargs)
+ self.column_name = column_name
+ self.data_type = data_type
+
+
+class InferenceContainerProperties(_serialization.Model):
+ """InferenceContainerProperties.
+
+ :ivar liveness_route: The route to check the liveness of the inference server container.
+ :vartype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :ivar readiness_route: The route to check the readiness of the inference server container.
+ :vartype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :ivar scoring_route: The port to send the scoring requests to, within the inference server
+ container.
+ :vartype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
+ """
+
+ _attribute_map = {
+ "liveness_route": {"key": "livenessRoute", "type": "Route"},
+ "readiness_route": {"key": "readinessRoute", "type": "Route"},
+ "scoring_route": {"key": "scoringRoute", "type": "Route"},
+ }
+
+ def __init__(
+ self,
+ *,
+ liveness_route: Optional["_models.Route"] = None,
+ readiness_route: Optional["_models.Route"] = None,
+ scoring_route: Optional["_models.Route"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword liveness_route: The route to check the liveness of the inference server container.
+ :paramtype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :keyword readiness_route: The route to check the readiness of the inference server container.
+ :paramtype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :keyword scoring_route: The port to send the scoring requests to, within the inference server
+ container.
+ :paramtype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
+ """
+ super().__init__(**kwargs)
+ self.liveness_route = liveness_route
+ self.readiness_route = readiness_route
+ self.scoring_route = scoring_route
+
+
+class InstanceTypeSchema(_serialization.Model):
+ """Instance type schema.
+
+ :ivar node_selector: Node Selector.
+ :vartype node_selector: dict[str, str]
+ :ivar resources: Resource requests/limits for this instance type.
+ :vartype resources: ~azure.mgmt.machinelearningservices.models.InstanceTypeSchemaResources
+ """
+
+ _attribute_map = {
+ "node_selector": {"key": "nodeSelector", "type": "{str}"},
+ "resources": {"key": "resources", "type": "InstanceTypeSchemaResources"},
+ }
+
+ def __init__(
+ self,
+ *,
+ node_selector: Optional[Dict[str, str]] = None,
+ resources: Optional["_models.InstanceTypeSchemaResources"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword node_selector: Node Selector.
+ :paramtype node_selector: dict[str, str]
+ :keyword resources: Resource requests/limits for this instance type.
+ :paramtype resources: ~azure.mgmt.machinelearningservices.models.InstanceTypeSchemaResources
+ """
+ super().__init__(**kwargs)
+ self.node_selector = node_selector
+ self.resources = resources
+
+
+class InstanceTypeSchemaResources(_serialization.Model):
+ """Resource requests/limits for this instance type.
+
+ :ivar requests: Resource requests for this instance type.
+ :vartype requests: dict[str, str]
+ :ivar limits: Resource limits for this instance type.
+ :vartype limits: dict[str, str]
+ """
+
+ _attribute_map = {
+ "requests": {"key": "requests", "type": "{str}"},
+ "limits": {"key": "limits", "type": "{str}"},
+ }
+
+ def __init__(
+ self, *, requests: Optional[Dict[str, str]] = None, limits: Optional[Dict[str, str]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword requests: Resource requests for this instance type.
+ :paramtype requests: dict[str, str]
+ :keyword limits: Resource limits for this instance type.
+ :paramtype limits: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.requests = requests
+ self.limits = limits
+
+
+class IntellectualProperty(_serialization.Model):
+ """Intellectual Property details for a resource.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar protection_level: Protection level of the Intellectual Property. Known values are: "All"
+ and "None".
+ :vartype protection_level: str or ~azure.mgmt.machinelearningservices.models.ProtectionLevel
+ :ivar publisher: [Required] Publisher of the Intellectual Property. Must be the same as
+ Registry publisher name. Required.
+ :vartype publisher: str
+ """
+
+ _validation = {
+ "publisher": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "protection_level": {"key": "protectionLevel", "type": "str"},
+ "publisher": {"key": "publisher", "type": "str"},
+ }
+
+ def __init__(
+ self, *, publisher: str, protection_level: Optional[Union[str, "_models.ProtectionLevel"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword protection_level: Protection level of the Intellectual Property. Known values are:
+ "All" and "None".
+ :paramtype protection_level: str or ~azure.mgmt.machinelearningservices.models.ProtectionLevel
+ :keyword publisher: [Required] Publisher of the Intellectual Property. Must be the same as
+ Registry publisher name. Required.
+ :paramtype publisher: str
+ """
+ super().__init__(**kwargs)
+ self.protection_level = protection_level
+ self.publisher = publisher
+
+
+class JobBase(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "JobBaseProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.JobBaseProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class JobBaseResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of JobBase entities.
+
+ :ivar next_link: The link to the next page of JobBase objects. If null, there are no additional
+ pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type JobBase.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.JobBase]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[JobBase]"},
+ }
+
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.JobBase"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of JobBase objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type JobBase.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.JobBase]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class JobResourceConfiguration(ResourceConfiguration):
+ """JobResourceConfiguration.
+
+ :ivar instance_count: Optional number of instances or nodes used by the compute target.
+ :vartype instance_count: int
+ :ivar instance_type: Optional type of VM used as supported by the compute target.
+ :vartype instance_type: str
+ :ivar locations: Locations where the job can run.
+ :vartype locations: list[str]
+ :ivar max_instance_count: Optional max allowed number of instances or nodes to be used by the
+ compute target.
+ For use with elastic training, currently supported by PyTorch distribution type only.
+ :vartype max_instance_count: int
+ :ivar properties: Additional properties bag.
+ :vartype properties: dict[str, JSON]
+ :ivar docker_args: Extra arguments to pass to the Docker run command. This would override any
+ parameters that have already been set by the system, or in this section. This parameter is only
+ supported for Azure ML compute types.
+ :vartype docker_args: str
+ :ivar shm_size: Size of the docker container's shared memory block. This should be in the
+ format of (number)(unit) where number as to be greater than 0 and the unit can be one of
+ b(bytes), k(kilobytes), m(megabytes), or g(gigabytes).
+ :vartype shm_size: str
+ """
+
+ _validation = {
+ "shm_size": {"pattern": r"\d+[bBkKmMgG]"},
+ }
+
+ _attribute_map = {
+ "instance_count": {"key": "instanceCount", "type": "int"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "locations": {"key": "locations", "type": "[str]"},
+ "max_instance_count": {"key": "maxInstanceCount", "type": "int"},
+ "properties": {"key": "properties", "type": "{object}"},
+ "docker_args": {"key": "dockerArgs", "type": "str"},
+ "shm_size": {"key": "shmSize", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ instance_count: int = 1,
+ instance_type: Optional[str] = None,
+ locations: Optional[List[str]] = None,
+ max_instance_count: Optional[int] = None,
+ properties: Optional[Dict[str, JSON]] = None,
+ docker_args: Optional[str] = None,
+ shm_size: str = "2g",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword instance_count: Optional number of instances or nodes used by the compute target.
+ :paramtype instance_count: int
+ :keyword instance_type: Optional type of VM used as supported by the compute target.
+ :paramtype instance_type: str
+ :keyword locations: Locations where the job can run.
+ :paramtype locations: list[str]
+ :keyword max_instance_count: Optional max allowed number of instances or nodes to be used by
+ the compute target.
+ For use with elastic training, currently supported by PyTorch distribution type only.
+ :paramtype max_instance_count: int
+ :keyword properties: Additional properties bag.
+ :paramtype properties: dict[str, JSON]
+ :keyword docker_args: Extra arguments to pass to the Docker run command. This would override
+ any parameters that have already been set by the system, or in this section. This parameter is
+ only supported for Azure ML compute types.
+ :paramtype docker_args: str
+ :keyword shm_size: Size of the docker container's shared memory block. This should be in the
+ format of (number)(unit) where number as to be greater than 0 and the unit can be one of
+ b(bytes), k(kilobytes), m(megabytes), or g(gigabytes).
+ :paramtype shm_size: str
+ """
+ super().__init__(
+ instance_count=instance_count,
+ instance_type=instance_type,
+ locations=locations,
+ max_instance_count=max_instance_count,
+ properties=properties,
+ **kwargs
+ )
+ self.docker_args = docker_args
+ self.shm_size = shm_size
+
+
+class JobScheduleAction(ScheduleActionBase):
+ """JobScheduleAction.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
+ are: "CreateJob", "InvokeBatchEndpoint", "ImportData", and "CreateMonitor".
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
+ :ivar job_definition: [Required] Defines Schedule action definition details. Required.
+ :vartype job_definition: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ """
+
+ _validation = {
+ "action_type": {"required": True},
+ "job_definition": {"required": True},
+ }
+
+ _attribute_map = {
+ "action_type": {"key": "actionType", "type": "str"},
+ "job_definition": {"key": "jobDefinition", "type": "JobBaseProperties"},
+ }
+
+ def __init__(self, *, job_definition: "_models.JobBaseProperties", **kwargs: Any) -> None:
+ """
+ :keyword job_definition: [Required] Defines Schedule action definition details. Required.
+ :paramtype job_definition: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ """
+ super().__init__(**kwargs)
+ self.action_type: str = "CreateJob"
+ self.job_definition = job_definition
+
+
+class JobService(_serialization.Model):
+ """Job endpoint definition.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar endpoint: Url for endpoint.
+ :vartype endpoint: str
+ :ivar error_message: Any error in the service.
+ :vartype error_message: str
+ :ivar job_service_type: Endpoint type.
+ :vartype job_service_type: str
+ :ivar nodes: Nodes that user would like to start the service on.
+ If Nodes is not set or set to null, the service will only be started on leader node.
+ :vartype nodes: ~azure.mgmt.machinelearningservices.models.Nodes
+ :ivar port: Port for endpoint set by user.
+ :vartype port: int
+ :ivar properties: Additional properties to set on the endpoint.
+ :vartype properties: dict[str, str]
+ :ivar status: Status of endpoint.
+ :vartype status: str
+ """
+
+ _validation = {
+ "error_message": {"readonly": True},
+ "status": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "endpoint": {"key": "endpoint", "type": "str"},
+ "error_message": {"key": "errorMessage", "type": "str"},
+ "job_service_type": {"key": "jobServiceType", "type": "str"},
+ "nodes": {"key": "nodes", "type": "Nodes"},
+ "port": {"key": "port", "type": "int"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "status": {"key": "status", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ endpoint: Optional[str] = None,
+ job_service_type: Optional[str] = None,
+ nodes: Optional["_models.Nodes"] = None,
+ port: Optional[int] = None,
+ properties: Optional[Dict[str, str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword endpoint: Url for endpoint.
+ :paramtype endpoint: str
+ :keyword job_service_type: Endpoint type.
+ :paramtype job_service_type: str
+ :keyword nodes: Nodes that user would like to start the service on.
+ If Nodes is not set or set to null, the service will only be started on leader node.
+ :paramtype nodes: ~azure.mgmt.machinelearningservices.models.Nodes
+ :keyword port: Port for endpoint set by user.
+ :paramtype port: int
+ :keyword properties: Additional properties to set on the endpoint.
+ :paramtype properties: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.endpoint = endpoint
+ self.error_message = None
+ self.job_service_type = job_service_type
+ self.nodes = nodes
+ self.port = port
+ self.properties = properties
+ self.status = None
+
+
+class KerberosCredentials(_serialization.Model):
+ """KerberosCredentials.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar kerberos_kdc_address: [Required] IP Address or DNS HostName. Required.
+ :vartype kerberos_kdc_address: str
+ :ivar kerberos_principal: [Required] Kerberos Username. Required.
+ :vartype kerberos_principal: str
+ :ivar kerberos_realm: [Required] Domain over which a Kerberos authentication server has the
+ authority to authenticate a user, host or service. Required.
+ :vartype kerberos_realm: str
+ """
+
+ _validation = {
+ "kerberos_kdc_address": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "kerberos_principal": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "kerberos_realm": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "kerberos_kdc_address": {"key": "kerberosKdcAddress", "type": "str"},
+ "kerberos_principal": {"key": "kerberosPrincipal", "type": "str"},
+ "kerberos_realm": {"key": "kerberosRealm", "type": "str"},
+ }
+
+ def __init__(
+ self, *, kerberos_kdc_address: str, kerberos_principal: str, kerberos_realm: str, **kwargs: Any
+ ) -> None:
+ """
+ :keyword kerberos_kdc_address: [Required] IP Address or DNS HostName. Required.
+ :paramtype kerberos_kdc_address: str
+ :keyword kerberos_principal: [Required] Kerberos Username. Required.
+ :paramtype kerberos_principal: str
+ :keyword kerberos_realm: [Required] Domain over which a Kerberos authentication server has the
+ authority to authenticate a user, host or service. Required.
+ :paramtype kerberos_realm: str
+ """
+ super().__init__(**kwargs)
+ self.kerberos_kdc_address = kerberos_kdc_address
+ self.kerberos_principal = kerberos_principal
+ self.kerberos_realm = kerberos_realm
+
+
+class KerberosKeytabCredentials(KerberosCredentials, DatastoreCredentials):
+ """KerberosKeytabCredentials.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar credentials_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", "ServicePrincipal",
+ "KerberosKeytab", and "KerberosPassword".
+ :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ :ivar kerberos_kdc_address: [Required] IP Address or DNS HostName. Required.
+ :vartype kerberos_kdc_address: str
+ :ivar kerberos_principal: [Required] Kerberos Username. Required.
+ :vartype kerberos_principal: str
+ :ivar kerberos_realm: [Required] Domain over which a Kerberos authentication server has the
+ authority to authenticate a user, host or service. Required.
+ :vartype kerberos_realm: str
+ :ivar secrets: [Required] Keytab secrets. Required.
+ :vartype secrets: ~azure.mgmt.machinelearningservices.models.KerberosKeytabSecrets
+ """
+
+ _validation = {
+ "credentials_type": {"required": True},
+ "kerberos_kdc_address": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "kerberos_principal": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "kerberos_realm": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "secrets": {"required": True},
+ }
+
+ _attribute_map = {
+ "credentials_type": {"key": "credentialsType", "type": "str"},
+ "kerberos_kdc_address": {"key": "kerberosKdcAddress", "type": "str"},
+ "kerberos_principal": {"key": "kerberosPrincipal", "type": "str"},
+ "kerberos_realm": {"key": "kerberosRealm", "type": "str"},
+ "secrets": {"key": "secrets", "type": "KerberosKeytabSecrets"},
+ }
+
+ def __init__(
+ self,
+ *,
+ kerberos_kdc_address: str,
+ kerberos_principal: str,
+ kerberos_realm: str,
+ secrets: "_models.KerberosKeytabSecrets",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword kerberos_kdc_address: [Required] IP Address or DNS HostName. Required.
+ :paramtype kerberos_kdc_address: str
+ :keyword kerberos_principal: [Required] Kerberos Username. Required.
+ :paramtype kerberos_principal: str
+ :keyword kerberos_realm: [Required] Domain over which a Kerberos authentication server has the
+ authority to authenticate a user, host or service. Required.
+ :paramtype kerberos_realm: str
+ :keyword secrets: [Required] Keytab secrets. Required.
+ :paramtype secrets: ~azure.mgmt.machinelearningservices.models.KerberosKeytabSecrets
+ """
+ super().__init__(
+ kerberos_kdc_address=kerberos_kdc_address,
+ kerberos_principal=kerberos_principal,
+ kerberos_realm=kerberos_realm,
+ **kwargs
+ )
+ self.credentials_type: str = "KerberosKeytab"
+ self.secrets = secrets
+ self.kerberos_kdc_address = kerberos_kdc_address
+ self.kerberos_principal = kerberos_principal
+ self.kerberos_realm = kerberos_realm
+
+
+class KerberosKeytabSecrets(DatastoreSecrets):
+ """KerberosKeytabSecrets.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
+ Known values are: "AccountKey", "Certificate", "Sas", "ServicePrincipal", "KerberosPassword",
+ and "KerberosKeytab".
+ :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
+ :ivar kerberos_keytab: Kerberos keytab secret.
+ :vartype kerberos_keytab: str
+ """
+
+ _validation = {
+ "secrets_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "secrets_type": {"key": "secretsType", "type": "str"},
+ "kerberos_keytab": {"key": "kerberosKeytab", "type": "str"},
+ }
+
+ def __init__(self, *, kerberos_keytab: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword kerberos_keytab: Kerberos keytab secret.
+ :paramtype kerberos_keytab: str
+ """
+ super().__init__(**kwargs)
+ self.secrets_type: str = "KerberosKeytab"
+ self.kerberos_keytab = kerberos_keytab
+
+
+class KerberosPasswordCredentials(KerberosCredentials, DatastoreCredentials):
+ """KerberosPasswordCredentials.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar credentials_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", "ServicePrincipal",
+ "KerberosKeytab", and "KerberosPassword".
+ :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ :ivar kerberos_kdc_address: [Required] IP Address or DNS HostName. Required.
+ :vartype kerberos_kdc_address: str
+ :ivar kerberos_principal: [Required] Kerberos Username. Required.
+ :vartype kerberos_principal: str
+ :ivar kerberos_realm: [Required] Domain over which a Kerberos authentication server has the
+ authority to authenticate a user, host or service. Required.
+ :vartype kerberos_realm: str
+ :ivar secrets: [Required] Kerberos password secrets. Required.
+ :vartype secrets: ~azure.mgmt.machinelearningservices.models.KerberosPasswordSecrets
+ """
+
+ _validation = {
+ "credentials_type": {"required": True},
+ "kerberos_kdc_address": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "kerberos_principal": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "kerberos_realm": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "secrets": {"required": True},
+ }
+
+ _attribute_map = {
+ "credentials_type": {"key": "credentialsType", "type": "str"},
+ "kerberos_kdc_address": {"key": "kerberosKdcAddress", "type": "str"},
+ "kerberos_principal": {"key": "kerberosPrincipal", "type": "str"},
+ "kerberos_realm": {"key": "kerberosRealm", "type": "str"},
+ "secrets": {"key": "secrets", "type": "KerberosPasswordSecrets"},
+ }
+
+ def __init__(
+ self,
+ *,
+ kerberos_kdc_address: str,
+ kerberos_principal: str,
+ kerberos_realm: str,
+ secrets: "_models.KerberosPasswordSecrets",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword kerberos_kdc_address: [Required] IP Address or DNS HostName. Required.
+ :paramtype kerberos_kdc_address: str
+ :keyword kerberos_principal: [Required] Kerberos Username. Required.
+ :paramtype kerberos_principal: str
+ :keyword kerberos_realm: [Required] Domain over which a Kerberos authentication server has the
+ authority to authenticate a user, host or service. Required.
+ :paramtype kerberos_realm: str
+ :keyword secrets: [Required] Kerberos password secrets. Required.
+ :paramtype secrets: ~azure.mgmt.machinelearningservices.models.KerberosPasswordSecrets
+ """
+ super().__init__(
+ kerberos_kdc_address=kerberos_kdc_address,
+ kerberos_principal=kerberos_principal,
+ kerberos_realm=kerberos_realm,
+ **kwargs
+ )
+ self.credentials_type: str = "KerberosPassword"
+ self.secrets = secrets
+ self.kerberos_kdc_address = kerberos_kdc_address
+ self.kerberos_principal = kerberos_principal
+ self.kerberos_realm = kerberos_realm
+
+
+class KerberosPasswordSecrets(DatastoreSecrets):
+ """KerberosPasswordSecrets.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
+ Known values are: "AccountKey", "Certificate", "Sas", "ServicePrincipal", "KerberosPassword",
+ and "KerberosKeytab".
+ :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
+ :ivar kerberos_password: Kerberos password secret.
+ :vartype kerberos_password: str
+ """
+
+ _validation = {
+ "secrets_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "secrets_type": {"key": "secretsType", "type": "str"},
+ "kerberos_password": {"key": "kerberosPassword", "type": "str"},
+ }
+
+ def __init__(self, *, kerberos_password: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword kerberos_password: Kerberos password secret.
+ :paramtype kerberos_password: str
+ """
+ super().__init__(**kwargs)
+ self.secrets_type: str = "KerberosPassword"
+ self.kerberos_password = kerberos_password
+
+
+class KeyVaultProperties(_serialization.Model):
+ """Customer Key vault properties.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar identity_client_id: Currently, we support only SystemAssigned MSI.
+ We need this when we support UserAssignedIdentities.
+ :vartype identity_client_id: str
+ :ivar key_identifier: KeyVault key identifier to encrypt the data. Required.
+ :vartype key_identifier: str
+ :ivar key_vault_arm_id: KeyVault Arm Id that contains the data encryption key. Required.
+ :vartype key_vault_arm_id: str
+ """
+
+ _validation = {
+ "key_identifier": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "key_vault_arm_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "identity_client_id": {"key": "identityClientId", "type": "str"},
+ "key_identifier": {"key": "keyIdentifier", "type": "str"},
+ "key_vault_arm_id": {"key": "keyVaultArmId", "type": "str"},
+ }
+
+ def __init__(
+ self, *, key_identifier: str, key_vault_arm_id: str, identity_client_id: Optional[str] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword identity_client_id: Currently, we support only SystemAssigned MSI.
+ We need this when we support UserAssignedIdentities.
+ :paramtype identity_client_id: str
+ :keyword key_identifier: KeyVault key identifier to encrypt the data. Required.
+ :paramtype key_identifier: str
+ :keyword key_vault_arm_id: KeyVault Arm Id that contains the data encryption key. Required.
+ :paramtype key_vault_arm_id: str
+ """
+ super().__init__(**kwargs)
+ self.identity_client_id = identity_client_id
+ self.key_identifier = key_identifier
+ self.key_vault_arm_id = key_vault_arm_id
+
+
+class KubernetesSchema(_serialization.Model):
+ """Kubernetes Compute Schema.
+
+ :ivar properties: Properties of Kubernetes.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ """
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "KubernetesProperties"},
+ }
+
+ def __init__(self, *, properties: Optional["_models.KubernetesProperties"] = None, **kwargs: Any) -> None:
+ """
+ :keyword properties: Properties of Kubernetes.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class Kubernetes(Compute, KubernetesSchema): # pylint: disable=too-many-instance-attributes
+ """A Machine Learning compute based on Kubernetes Compute.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar properties: Properties of Kubernetes.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
+ "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
+ "DataLakeAnalytics", and "SynapseSpark".
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
+ :ivar compute_location: Location for the underlying compute.
+ :vartype compute_location: str
+ :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
+ Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
+ "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningState
+ :ivar description: The description of the Machine Learning compute.
+ :vartype description: str
+ :ivar created_on: The time at which the compute was created.
+ :vartype created_on: ~datetime.datetime
+ :ivar modified_on: The time at which the compute was last modified.
+ :vartype modified_on: ~datetime.datetime
+ :ivar resource_id: ARM resource id of the underlying compute.
+ :vartype resource_id: str
+ :ivar provisioning_errors: Errors during provisioning.
+ :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
+ :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
+ from outside if true, or machine learning service provisioned it if false.
+ :vartype is_attached_compute: bool
+ :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
+ and AAD exclusively for authentication.
+ :vartype disable_local_auth: bool
+ """
+
+ _validation = {
+ "compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "created_on": {"readonly": True},
+ "modified_on": {"readonly": True},
+ "provisioning_errors": {"readonly": True},
+ "is_attached_compute": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "KubernetesProperties"},
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_location": {"key": "computeLocation", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "created_on": {"key": "createdOn", "type": "iso-8601"},
+ "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
+ "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
+ "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ }
+
+ def __init__(
+ self,
+ *,
+ properties: Optional["_models.KubernetesProperties"] = None,
+ compute_location: Optional[str] = None,
+ description: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ disable_local_auth: Optional[bool] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword properties: Properties of Kubernetes.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ :keyword compute_location: Location for the underlying compute.
+ :paramtype compute_location: str
+ :keyword description: The description of the Machine Learning compute.
+ :paramtype description: str
+ :keyword resource_id: ARM resource id of the underlying compute.
+ :paramtype resource_id: str
+ :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
+ MSI and AAD exclusively for authentication.
+ :paramtype disable_local_auth: bool
+ """
+ super().__init__(
+ compute_location=compute_location,
+ description=description,
+ resource_id=resource_id,
+ disable_local_auth=disable_local_auth,
+ properties=properties,
+ **kwargs
+ )
+ self.properties = properties
+ self.compute_type: str = "Kubernetes"
+ self.compute_location = compute_location
+ self.provisioning_state = None
+ self.description = description
+ self.created_on = None
+ self.modified_on = None
+ self.resource_id = resource_id
+ self.provisioning_errors = None
+ self.is_attached_compute = None
+ self.disable_local_auth = disable_local_auth
+
+
+class OnlineDeploymentProperties(EndpointDeploymentPropertiesBase): # pylint: disable=too-many-instance-attributes
+ """OnlineDeploymentProperties.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ KubernetesOnlineDeployment, ManagedOnlineDeployment
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar code_configuration: Code configuration for the endpoint deployment.
+ :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :ivar description: Description of the endpoint deployment.
+ :vartype description: str
+ :ivar environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
+ :vartype environment_id: str
+ :ivar environment_variables: Environment variables configuration for the deployment.
+ :vartype environment_variables: dict[str, str]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar app_insights_enabled: If true, enables Application Insights logging.
+ :vartype app_insights_enabled: bool
+ :ivar data_collector: The mdc configuration, we disable mdc when it's null.
+ :vartype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :ivar egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :vartype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
+ values are: "Managed", "Kubernetes", and "AzureMLCompute".
+ :vartype endpoint_compute_type: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointComputeType
+ :ivar instance_type: Compute instance type.
+ :vartype instance_type: str
+ :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
+ :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar model: The URI path to the model.
+ :vartype model: str
+ :ivar model_mount_path: The path to mount the model in custom container.
+ :vartype model_mount_path: str
+ :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
+ "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
+ :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar request_settings: Request settings for the deployment.
+ :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :ivar scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ """
+
+ _validation = {
+ "endpoint_compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
+ "description": {"key": "description", "type": "str"},
+ "environment_id": {"key": "environmentId", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
+ "data_collector": {"key": "dataCollector", "type": "DataCollector"},
+ "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
+ "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
+ "model": {"key": "model", "type": "str"},
+ "model_mount_path": {"key": "modelMountPath", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
+ "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
+ "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
+ }
+
+ _subtype_map = {
+ "endpoint_compute_type": {"Kubernetes": "KubernetesOnlineDeployment", "Managed": "ManagedOnlineDeployment"}
+ }
+
+ def __init__(
+ self,
+ *,
+ code_configuration: Optional["_models.CodeConfiguration"] = None,
+ description: Optional[str] = None,
+ environment_id: Optional[str] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ app_insights_enabled: bool = False,
+ data_collector: Optional["_models.DataCollector"] = None,
+ egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
+ instance_type: Optional[str] = None,
+ liveness_probe: Optional["_models.ProbeSettings"] = None,
+ model: Optional[str] = None,
+ model_mount_path: Optional[str] = None,
+ readiness_probe: Optional["_models.ProbeSettings"] = None,
+ request_settings: Optional["_models.OnlineRequestSettings"] = None,
+ scale_settings: Optional["_models.OnlineScaleSettings"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword code_configuration: Code configuration for the endpoint deployment.
+ :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :keyword description: Description of the endpoint deployment.
+ :paramtype description: str
+ :keyword environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
+ :paramtype environment_id: str
+ :keyword environment_variables: Environment variables configuration for the deployment.
+ :paramtype environment_variables: dict[str, str]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword app_insights_enabled: If true, enables Application Insights logging.
+ :paramtype app_insights_enabled: bool
+ :keyword data_collector: The mdc configuration, we disable mdc when it's null.
+ :paramtype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :keyword egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :paramtype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :keyword instance_type: Compute instance type.
+ :paramtype instance_type: str
+ :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
+ :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword model: The URI path to the model.
+ :paramtype model: str
+ :keyword model_mount_path: The path to mount the model in custom container.
+ :paramtype model_mount_path: str
+ :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword request_settings: Request settings for the deployment.
+ :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :keyword scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ """
+ super().__init__(
+ code_configuration=code_configuration,
+ description=description,
+ environment_id=environment_id,
+ environment_variables=environment_variables,
+ properties=properties,
+ **kwargs
+ )
+ self.app_insights_enabled = app_insights_enabled
+ self.data_collector = data_collector
+ self.egress_public_network_access = egress_public_network_access
+ self.endpoint_compute_type: Optional[str] = None
+ self.instance_type = instance_type
+ self.liveness_probe = liveness_probe
+ self.model = model
+ self.model_mount_path = model_mount_path
+ self.provisioning_state = None
+ self.readiness_probe = readiness_probe
+ self.request_settings = request_settings
+ self.scale_settings = scale_settings
+
+
+class KubernetesOnlineDeployment(OnlineDeploymentProperties): # pylint: disable=too-many-instance-attributes
+ """Properties specific to a KubernetesOnlineDeployment.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar code_configuration: Code configuration for the endpoint deployment.
+ :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :ivar description: Description of the endpoint deployment.
+ :vartype description: str
+ :ivar environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
+ :vartype environment_id: str
+ :ivar environment_variables: Environment variables configuration for the deployment.
+ :vartype environment_variables: dict[str, str]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar app_insights_enabled: If true, enables Application Insights logging.
+ :vartype app_insights_enabled: bool
+ :ivar data_collector: The mdc configuration, we disable mdc when it's null.
+ :vartype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :ivar egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :vartype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
+ values are: "Managed", "Kubernetes", and "AzureMLCompute".
+ :vartype endpoint_compute_type: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointComputeType
+ :ivar instance_type: Compute instance type.
+ :vartype instance_type: str
+ :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
+ :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar model: The URI path to the model.
+ :vartype model: str
+ :ivar model_mount_path: The path to mount the model in custom container.
+ :vartype model_mount_path: str
+ :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
+ "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
+ :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar request_settings: Request settings for the deployment.
+ :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :ivar scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ :ivar container_resource_requirements: The resource requirements for the container (cpu and
+ memory).
+ :vartype container_resource_requirements:
+ ~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
+ """
+
+ _validation = {
+ "endpoint_compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
+ "description": {"key": "description", "type": "str"},
+ "environment_id": {"key": "environmentId", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
+ "data_collector": {"key": "dataCollector", "type": "DataCollector"},
+ "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
+ "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
+ "model": {"key": "model", "type": "str"},
+ "model_mount_path": {"key": "modelMountPath", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
+ "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
+ "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
+ "container_resource_requirements": {
+ "key": "containerResourceRequirements",
+ "type": "ContainerResourceRequirements",
+ },
+ }
+
+ def __init__(
+ self,
+ *,
+ code_configuration: Optional["_models.CodeConfiguration"] = None,
+ description: Optional[str] = None,
+ environment_id: Optional[str] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ app_insights_enabled: bool = False,
+ data_collector: Optional["_models.DataCollector"] = None,
+ egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
+ instance_type: Optional[str] = None,
+ liveness_probe: Optional["_models.ProbeSettings"] = None,
+ model: Optional[str] = None,
+ model_mount_path: Optional[str] = None,
+ readiness_probe: Optional["_models.ProbeSettings"] = None,
+ request_settings: Optional["_models.OnlineRequestSettings"] = None,
+ scale_settings: Optional["_models.OnlineScaleSettings"] = None,
+ container_resource_requirements: Optional["_models.ContainerResourceRequirements"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword code_configuration: Code configuration for the endpoint deployment.
+ :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :keyword description: Description of the endpoint deployment.
+ :paramtype description: str
+ :keyword environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
+ :paramtype environment_id: str
+ :keyword environment_variables: Environment variables configuration for the deployment.
+ :paramtype environment_variables: dict[str, str]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword app_insights_enabled: If true, enables Application Insights logging.
+ :paramtype app_insights_enabled: bool
+ :keyword data_collector: The mdc configuration, we disable mdc when it's null.
+ :paramtype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :keyword egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :paramtype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :keyword instance_type: Compute instance type.
+ :paramtype instance_type: str
+ :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
+ :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword model: The URI path to the model.
+ :paramtype model: str
+ :keyword model_mount_path: The path to mount the model in custom container.
+ :paramtype model_mount_path: str
+ :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword request_settings: Request settings for the deployment.
+ :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :keyword scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ :keyword container_resource_requirements: The resource requirements for the container (cpu and
+ memory).
+ :paramtype container_resource_requirements:
+ ~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
+ """
+ super().__init__(
+ code_configuration=code_configuration,
+ description=description,
+ environment_id=environment_id,
+ environment_variables=environment_variables,
+ properties=properties,
+ app_insights_enabled=app_insights_enabled,
+ data_collector=data_collector,
+ egress_public_network_access=egress_public_network_access,
+ instance_type=instance_type,
+ liveness_probe=liveness_probe,
+ model=model,
+ model_mount_path=model_mount_path,
+ readiness_probe=readiness_probe,
+ request_settings=request_settings,
+ scale_settings=scale_settings,
+ **kwargs
+ )
+ self.endpoint_compute_type: str = "Kubernetes"
+ self.container_resource_requirements = container_resource_requirements
+
+
+class KubernetesProperties(_serialization.Model):
+ """Kubernetes properties.
+
+ :ivar relay_connection_string: Relay connection string.
+ :vartype relay_connection_string: str
+ :ivar service_bus_connection_string: ServiceBus connection string.
+ :vartype service_bus_connection_string: str
+ :ivar extension_principal_id: Extension principal-id.
+ :vartype extension_principal_id: str
+ :ivar extension_instance_release_train: Extension instance release train.
+ :vartype extension_instance_release_train: str
+ :ivar vc_name: VC name.
+ :vartype vc_name: str
+ :ivar namespace: Compute namespace.
+ :vartype namespace: str
+ :ivar default_instance_type: Default instance type.
+ :vartype default_instance_type: str
+ :ivar instance_types: Instance Type Schema.
+ :vartype instance_types: dict[str,
+ ~azure.mgmt.machinelearningservices.models.InstanceTypeSchema]
+ """
+
+ _attribute_map = {
+ "relay_connection_string": {"key": "relayConnectionString", "type": "str"},
+ "service_bus_connection_string": {"key": "serviceBusConnectionString", "type": "str"},
+ "extension_principal_id": {"key": "extensionPrincipalId", "type": "str"},
+ "extension_instance_release_train": {"key": "extensionInstanceReleaseTrain", "type": "str"},
+ "vc_name": {"key": "vcName", "type": "str"},
+ "namespace": {"key": "namespace", "type": "str"},
+ "default_instance_type": {"key": "defaultInstanceType", "type": "str"},
+ "instance_types": {"key": "instanceTypes", "type": "{InstanceTypeSchema}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ relay_connection_string: Optional[str] = None,
+ service_bus_connection_string: Optional[str] = None,
+ extension_principal_id: Optional[str] = None,
+ extension_instance_release_train: Optional[str] = None,
+ vc_name: Optional[str] = None,
+ namespace: str = "default",
+ default_instance_type: Optional[str] = None,
+ instance_types: Optional[Dict[str, "_models.InstanceTypeSchema"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword relay_connection_string: Relay connection string.
+ :paramtype relay_connection_string: str
+ :keyword service_bus_connection_string: ServiceBus connection string.
+ :paramtype service_bus_connection_string: str
+ :keyword extension_principal_id: Extension principal-id.
+ :paramtype extension_principal_id: str
+ :keyword extension_instance_release_train: Extension instance release train.
+ :paramtype extension_instance_release_train: str
+ :keyword vc_name: VC name.
+ :paramtype vc_name: str
+ :keyword namespace: Compute namespace.
+ :paramtype namespace: str
+ :keyword default_instance_type: Default instance type.
+ :paramtype default_instance_type: str
+ :keyword instance_types: Instance Type Schema.
+ :paramtype instance_types: dict[str,
+ ~azure.mgmt.machinelearningservices.models.InstanceTypeSchema]
+ """
+ super().__init__(**kwargs)
+ self.relay_connection_string = relay_connection_string
+ self.service_bus_connection_string = service_bus_connection_string
+ self.extension_principal_id = extension_principal_id
+ self.extension_instance_release_train = extension_instance_release_train
+ self.vc_name = vc_name
+ self.namespace = namespace
+ self.default_instance_type = default_instance_type
+ self.instance_types = instance_types
+
+
+class LabelCategory(_serialization.Model):
+ """Label category definition.
+
+ :ivar classes: Dictionary of label classes in this category.
+ :vartype classes: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
+ :ivar display_name: Display name of the label category.
+ :vartype display_name: str
+ :ivar multi_select: Indicates whether it is allowed to select multiple classes in this
+ category. Known values are: "Enabled" and "Disabled".
+ :vartype multi_select: str or ~azure.mgmt.machinelearningservices.models.MultiSelect
+ """
+
+ _attribute_map = {
+ "classes": {"key": "classes", "type": "{LabelClass}"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "multi_select": {"key": "multiSelect", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ classes: Optional[Dict[str, "_models.LabelClass"]] = None,
+ display_name: Optional[str] = None,
+ multi_select: Optional[Union[str, "_models.MultiSelect"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword classes: Dictionary of label classes in this category.
+ :paramtype classes: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
+ :keyword display_name: Display name of the label category.
+ :paramtype display_name: str
+ :keyword multi_select: Indicates whether it is allowed to select multiple classes in this
+ category. Known values are: "Enabled" and "Disabled".
+ :paramtype multi_select: str or ~azure.mgmt.machinelearningservices.models.MultiSelect
+ """
+ super().__init__(**kwargs)
+ self.classes = classes
+ self.display_name = display_name
+ self.multi_select = multi_select
+
+
+class LabelClass(_serialization.Model):
+ """Label class definition.
+
+ :ivar display_name: Display name of the label class.
+ :vartype display_name: str
+ :ivar subclasses: Dictionary of subclasses of the label class.
+ :vartype subclasses: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
+ """
+
+ _attribute_map = {
+ "display_name": {"key": "displayName", "type": "str"},
+ "subclasses": {"key": "subclasses", "type": "{LabelClass}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ display_name: Optional[str] = None,
+ subclasses: Optional[Dict[str, "_models.LabelClass"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword display_name: Display name of the label class.
+ :paramtype display_name: str
+ :keyword subclasses: Dictionary of subclasses of the label class.
+ :paramtype subclasses: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
+ """
+ super().__init__(**kwargs)
+ self.display_name = display_name
+ self.subclasses = subclasses
+
+
+class LabelingDataConfiguration(_serialization.Model):
+ """Labeling data configuration definition.
+
+ :ivar data_id: Resource Id of the data asset to perform labeling.
+ :vartype data_id: str
+ :ivar incremental_data_refresh: Indicates whether to enable incremental data refresh. Known
+ values are: "Enabled" and "Disabled".
+ :vartype incremental_data_refresh: str or
+ ~azure.mgmt.machinelearningservices.models.IncrementalDataRefresh
+ """
+
+ _attribute_map = {
+ "data_id": {"key": "dataId", "type": "str"},
+ "incremental_data_refresh": {"key": "incrementalDataRefresh", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ data_id: Optional[str] = None,
+ incremental_data_refresh: Optional[Union[str, "_models.IncrementalDataRefresh"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword data_id: Resource Id of the data asset to perform labeling.
+ :paramtype data_id: str
+ :keyword incremental_data_refresh: Indicates whether to enable incremental data refresh. Known
+ values are: "Enabled" and "Disabled".
+ :paramtype incremental_data_refresh: str or
+ ~azure.mgmt.machinelearningservices.models.IncrementalDataRefresh
+ """
+ super().__init__(**kwargs)
+ self.data_id = data_id
+ self.incremental_data_refresh = incremental_data_refresh
+
+
+class LabelingJob(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.LabelingJobProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "LabelingJobProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.LabelingJobProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.LabelingJobProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class LabelingJobMediaProperties(_serialization.Model):
+ """Properties of a labeling job.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ LabelingJobImageProperties, LabelingJobTextProperties
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar media_type: [Required] Media type of the job. Required. Known values are: "Image" and
+ "Text".
+ :vartype media_type: str or ~azure.mgmt.machinelearningservices.models.MediaType
+ """
+
+ _validation = {
+ "media_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "media_type": {"key": "mediaType", "type": "str"},
+ }
+
+ _subtype_map = {"media_type": {"Image": "LabelingJobImageProperties", "Text": "LabelingJobTextProperties"}}
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.media_type: Optional[str] = None
+
+
+class LabelingJobImageProperties(LabelingJobMediaProperties):
+ """Properties of a labeling job for image data.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar media_type: [Required] Media type of the job. Required. Known values are: "Image" and
+ "Text".
+ :vartype media_type: str or ~azure.mgmt.machinelearningservices.models.MediaType
+ :ivar annotation_type: Annotation type of image labeling job. Known values are:
+ "Classification", "BoundingBox", and "InstanceSegmentation".
+ :vartype annotation_type: str or ~azure.mgmt.machinelearningservices.models.ImageAnnotationType
+ """
+
+ _validation = {
+ "media_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "media_type": {"key": "mediaType", "type": "str"},
+ "annotation_type": {"key": "annotationType", "type": "str"},
+ }
+
+ def __init__(
+ self, *, annotation_type: Optional[Union[str, "_models.ImageAnnotationType"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword annotation_type: Annotation type of image labeling job. Known values are:
+ "Classification", "BoundingBox", and "InstanceSegmentation".
+ :paramtype annotation_type: str or
+ ~azure.mgmt.machinelearningservices.models.ImageAnnotationType
+ """
+ super().__init__(**kwargs)
+ self.media_type: str = "Image"
+ self.annotation_type = annotation_type
+
+
+class LabelingJobInstructions(_serialization.Model):
+ """Instructions for labeling job.
+
+ :ivar uri: The link to a page with detailed labeling instructions for labelers.
+ :vartype uri: str
+ """
+
+ _attribute_map = {
+ "uri": {"key": "uri", "type": "str"},
+ }
+
+ def __init__(self, *, uri: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword uri: The link to a page with detailed labeling instructions for labelers.
+ :paramtype uri: str
+ """
+ super().__init__(**kwargs)
+ self.uri = uri
+
+
+class LabelingJobProperties(JobBaseProperties): # pylint: disable=too-many-instance-attributes
+ """Labeling job definition.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar component_id: ARM resource ID of the component resource.
+ :vartype component_id: str
+ :ivar compute_id: ARM resource ID of the compute resource.
+ :vartype compute_id: str
+ :ivar display_name: Display name of job.
+ :vartype display_name: str
+ :ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :vartype experiment_name: str
+ :ivar identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
+ "Command", "Labeling", "Sweep", "Pipeline", and "Spark".
+ :vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar secrets_configuration: Configuration for secrets to be made available during runtime.
+ :vartype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
+ :ivar services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
+ "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
+ "Canceled", "NotResponding", "Paused", "Unknown", and "Scheduled".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
+ :ivar created_date_time: Created time of the job in UTC timezone.
+ :vartype created_date_time: ~datetime.datetime
+ :ivar data_configuration: Configuration of data used in the job.
+ :vartype data_configuration:
+ ~azure.mgmt.machinelearningservices.models.LabelingDataConfiguration
+ :ivar job_instructions: Labeling instructions of the job.
+ :vartype job_instructions: ~azure.mgmt.machinelearningservices.models.LabelingJobInstructions
+ :ivar label_categories: Label categories of the job.
+ :vartype label_categories: dict[str, ~azure.mgmt.machinelearningservices.models.LabelCategory]
+ :ivar labeling_job_media_properties: Media type specific properties in the job.
+ :vartype labeling_job_media_properties:
+ ~azure.mgmt.machinelearningservices.models.LabelingJobMediaProperties
+ :ivar ml_assist_configuration: Configuration of MLAssist feature in the job.
+ :vartype ml_assist_configuration:
+ ~azure.mgmt.machinelearningservices.models.MLAssistConfiguration
+ :ivar progress_metrics: Progress metrics of the job.
+ :vartype progress_metrics: ~azure.mgmt.machinelearningservices.models.ProgressMetrics
+ :ivar project_id: Internal id of the job(Previously called project).
+ :vartype project_id: str
+ :ivar provisioning_state: Specifies the labeling job provisioning state. Known values are:
+ "Succeeded", "Failed", "Canceled", and "InProgress".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.JobProvisioningState
+ :ivar status_messages: Status messages of the job.
+ :vartype status_messages: list[~azure.mgmt.machinelearningservices.models.StatusMessage]
+ """
+
+ _validation = {
+ "job_type": {"required": True},
+ "status": {"readonly": True},
+ "created_date_time": {"readonly": True},
+ "progress_metrics": {"readonly": True},
+ "project_id": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ "status_messages": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "component_id": {"key": "componentId", "type": "str"},
+ "compute_id": {"key": "computeId", "type": "str"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "experiment_name": {"key": "experimentName", "type": "str"},
+ "identity": {"key": "identity", "type": "IdentityConfiguration"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
+ "secrets_configuration": {"key": "secretsConfiguration", "type": "{SecretConfiguration}"},
+ "services": {"key": "services", "type": "{JobService}"},
+ "status": {"key": "status", "type": "str"},
+ "created_date_time": {"key": "createdDateTime", "type": "iso-8601"},
+ "data_configuration": {"key": "dataConfiguration", "type": "LabelingDataConfiguration"},
+ "job_instructions": {"key": "jobInstructions", "type": "LabelingJobInstructions"},
+ "label_categories": {"key": "labelCategories", "type": "{LabelCategory}"},
+ "labeling_job_media_properties": {"key": "labelingJobMediaProperties", "type": "LabelingJobMediaProperties"},
+ "ml_assist_configuration": {"key": "mlAssistConfiguration", "type": "MLAssistConfiguration"},
+ "progress_metrics": {"key": "progressMetrics", "type": "ProgressMetrics"},
+ "project_id": {"key": "projectId", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "status_messages": {"key": "statusMessages", "type": "[StatusMessage]"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ component_id: Optional[str] = None,
+ compute_id: Optional[str] = None,
+ display_name: Optional[str] = None,
+ experiment_name: str = "Default",
+ identity: Optional["_models.IdentityConfiguration"] = None,
+ is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
+ secrets_configuration: Optional[Dict[str, "_models.SecretConfiguration"]] = None,
+ services: Optional[Dict[str, "_models.JobService"]] = None,
+ data_configuration: Optional["_models.LabelingDataConfiguration"] = None,
+ job_instructions: Optional["_models.LabelingJobInstructions"] = None,
+ label_categories: Optional[Dict[str, "_models.LabelCategory"]] = None,
+ labeling_job_media_properties: Optional["_models.LabelingJobMediaProperties"] = None,
+ ml_assist_configuration: Optional["_models.MLAssistConfiguration"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword component_id: ARM resource ID of the component resource.
+ :paramtype component_id: str
+ :keyword compute_id: ARM resource ID of the compute resource.
+ :paramtype compute_id: str
+ :keyword display_name: Display name of job.
+ :paramtype display_name: str
+ :keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :paramtype experiment_name: str
+ :keyword identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword secrets_configuration: Configuration for secrets to be made available during runtime.
+ :paramtype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
+ :keyword services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :keyword data_configuration: Configuration of data used in the job.
+ :paramtype data_configuration:
+ ~azure.mgmt.machinelearningservices.models.LabelingDataConfiguration
+ :keyword job_instructions: Labeling instructions of the job.
+ :paramtype job_instructions: ~azure.mgmt.machinelearningservices.models.LabelingJobInstructions
+ :keyword label_categories: Label categories of the job.
+ :paramtype label_categories: dict[str,
+ ~azure.mgmt.machinelearningservices.models.LabelCategory]
+ :keyword labeling_job_media_properties: Media type specific properties in the job.
+ :paramtype labeling_job_media_properties:
+ ~azure.mgmt.machinelearningservices.models.LabelingJobMediaProperties
+ :keyword ml_assist_configuration: Configuration of MLAssist feature in the job.
+ :paramtype ml_assist_configuration:
+ ~azure.mgmt.machinelearningservices.models.MLAssistConfiguration
+ """
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ component_id=component_id,
+ compute_id=compute_id,
+ display_name=display_name,
+ experiment_name=experiment_name,
+ identity=identity,
+ is_archived=is_archived,
+ notification_setting=notification_setting,
+ secrets_configuration=secrets_configuration,
+ services=services,
+ **kwargs
+ )
+ self.job_type: str = "Labeling"
+ self.created_date_time = None
+ self.data_configuration = data_configuration
+ self.job_instructions = job_instructions
+ self.label_categories = label_categories
+ self.labeling_job_media_properties = labeling_job_media_properties
+ self.ml_assist_configuration = ml_assist_configuration
+ self.progress_metrics = None
+ self.project_id = None
+ self.provisioning_state = None
+ self.status_messages = None
+
+
+class LabelingJobResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of LabelingJob entities.
+
+ :ivar next_link: The link to the next page of LabelingJob objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type LabelingJob.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[LabelingJob]"},
+ }
+
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.LabelingJob"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of LabelingJob objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type LabelingJob.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class LabelingJobTextProperties(LabelingJobMediaProperties):
+ """Properties of a labeling job for text data.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar media_type: [Required] Media type of the job. Required. Known values are: "Image" and
+ "Text".
+ :vartype media_type: str or ~azure.mgmt.machinelearningservices.models.MediaType
+ :ivar annotation_type: Annotation type of text labeling job. Known values are: "Classification"
+ and "NamedEntityRecognition".
+ :vartype annotation_type: str or ~azure.mgmt.machinelearningservices.models.TextAnnotationType
+ """
+
+ _validation = {
+ "media_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "media_type": {"key": "mediaType", "type": "str"},
+ "annotation_type": {"key": "annotationType", "type": "str"},
+ }
+
+ def __init__(
+ self, *, annotation_type: Optional[Union[str, "_models.TextAnnotationType"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword annotation_type: Annotation type of text labeling job. Known values are:
+ "Classification" and "NamedEntityRecognition".
+ :paramtype annotation_type: str or
+ ~azure.mgmt.machinelearningservices.models.TextAnnotationType
+ """
+ super().__init__(**kwargs)
+ self.media_type: str = "Text"
+ self.annotation_type = annotation_type
+
+
+class OneLakeArtifact(_serialization.Model):
+ """OneLake artifact (data source) configuration.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ LakeHouseArtifact
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar artifact_name: [Required] OneLake artifact name. Required.
+ :vartype artifact_name: str
+ :ivar artifact_type: [Required] OneLake artifact type. Required. "LakeHouse"
+ :vartype artifact_type: str or ~azure.mgmt.machinelearningservices.models.OneLakeArtifactType
+ """
+
+ _validation = {
+ "artifact_name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "artifact_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "artifact_name": {"key": "artifactName", "type": "str"},
+ "artifact_type": {"key": "artifactType", "type": "str"},
+ }
+
+ _subtype_map = {"artifact_type": {"LakeHouse": "LakeHouseArtifact"}}
+
+ def __init__(self, *, artifact_name: str, **kwargs: Any) -> None:
+ """
+ :keyword artifact_name: [Required] OneLake artifact name. Required.
+ :paramtype artifact_name: str
+ """
+ super().__init__(**kwargs)
+ self.artifact_name = artifact_name
+ self.artifact_type: Optional[str] = None
+
+
+class LakeHouseArtifact(OneLakeArtifact):
+ """LakeHouseArtifact.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar artifact_name: [Required] OneLake artifact name. Required.
+ :vartype artifact_name: str
+ :ivar artifact_type: [Required] OneLake artifact type. Required. "LakeHouse"
+ :vartype artifact_type: str or ~azure.mgmt.machinelearningservices.models.OneLakeArtifactType
+ """
+
+ _validation = {
+ "artifact_name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "artifact_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "artifact_name": {"key": "artifactName", "type": "str"},
+ "artifact_type": {"key": "artifactType", "type": "str"},
+ }
+
+ def __init__(self, *, artifact_name: str, **kwargs: Any) -> None:
+ """
+ :keyword artifact_name: [Required] OneLake artifact name. Required.
+ :paramtype artifact_name: str
+ """
+ super().__init__(artifact_name=artifact_name, **kwargs)
+ self.artifact_type: str = "LakeHouse"
+
+
+class ListAmlUserFeatureResult(_serialization.Model):
+ """The List Aml user feature operation response.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of AML user facing features.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.AmlUserFeature]
+ :ivar next_link: The URI to fetch the next page of AML user features information. Call
+ ListNext() with this to fetch the next page of AML user features information.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "value": {"readonly": True},
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[AmlUserFeature]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.value = None
+ self.next_link = None
+
+
+class ListNotebookKeysResult(_serialization.Model):
+ """ListNotebookKeysResult.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar primary_access_key: The primary access key of the Notebook.
+ :vartype primary_access_key: str
+ :ivar secondary_access_key: The secondary access key of the Notebook.
+ :vartype secondary_access_key: str
+ """
+
+ _validation = {
+ "primary_access_key": {"readonly": True},
+ "secondary_access_key": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "primary_access_key": {"key": "primaryAccessKey", "type": "str"},
+ "secondary_access_key": {"key": "secondaryAccessKey", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.primary_access_key = None
+ self.secondary_access_key = None
+
+
+class ListStorageAccountKeysResult(_serialization.Model):
+ """ListStorageAccountKeysResult.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar user_storage_key: The access key of the storage.
+ :vartype user_storage_key: str
+ """
+
+ _validation = {
+ "user_storage_key": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "user_storage_key": {"key": "userStorageKey", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.user_storage_key = None
+
+
+class ListUsagesResult(_serialization.Model):
+ """The List Usages operation response.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of AML resource usages.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Usage]
+ :ivar next_link: The URI to fetch the next page of AML resource usage information. Call
+ ListNext() with this to fetch the next page of AML resource usage information.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "value": {"readonly": True},
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[Usage]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.value = None
+ self.next_link = None
+
+
+class ListWorkspaceKeysResult(_serialization.Model):
+ """ListWorkspaceKeysResult.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar app_insights_instrumentation_key: The access key of the workspace app insights.
+ :vartype app_insights_instrumentation_key: str
+ :ivar container_registry_credentials:
+ :vartype container_registry_credentials:
+ ~azure.mgmt.machinelearningservices.models.RegistryListCredentialsResult
+ :ivar notebook_access_keys:
+ :vartype notebook_access_keys:
+ ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
+ :ivar user_storage_arm_id: The arm Id key of the workspace storage.
+ :vartype user_storage_arm_id: str
+ :ivar user_storage_key: The access key of the workspace storage.
+ :vartype user_storage_key: str
+ """
+
+ _validation = {
+ "app_insights_instrumentation_key": {"readonly": True},
+ "user_storage_arm_id": {"readonly": True},
+ "user_storage_key": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "app_insights_instrumentation_key": {"key": "appInsightsInstrumentationKey", "type": "str"},
+ "container_registry_credentials": {
+ "key": "containerRegistryCredentials",
+ "type": "RegistryListCredentialsResult",
+ },
+ "notebook_access_keys": {"key": "notebookAccessKeys", "type": "ListNotebookKeysResult"},
+ "user_storage_arm_id": {"key": "userStorageArmId", "type": "str"},
+ "user_storage_key": {"key": "userStorageKey", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ container_registry_credentials: Optional["_models.RegistryListCredentialsResult"] = None,
+ notebook_access_keys: Optional["_models.ListNotebookKeysResult"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword container_registry_credentials:
+ :paramtype container_registry_credentials:
+ ~azure.mgmt.machinelearningservices.models.RegistryListCredentialsResult
+ :keyword notebook_access_keys:
+ :paramtype notebook_access_keys:
+ ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
+ """
+ super().__init__(**kwargs)
+ self.app_insights_instrumentation_key = None
+ self.container_registry_credentials = container_registry_credentials
+ self.notebook_access_keys = notebook_access_keys
+ self.user_storage_arm_id = None
+ self.user_storage_key = None
+
+
+class ListWorkspaceQuotas(_serialization.Model):
+ """The List WorkspaceQuotasByVMFamily operation response.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of Workspace Quotas by VM Family.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ResourceQuota]
+ :ivar next_link: The URI to fetch the next page of workspace quota information by VM Family.
+ Call ListNext() with this to fetch the next page of Workspace Quota information.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "value": {"readonly": True},
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[ResourceQuota]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.value = None
+ self.next_link = None
+
+
+class LiteralJobInput(JobInput):
+ """Literal input type.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: Description for the input.
+ :vartype description: str
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar value: [Required] Literal value for the input. Required.
+ :vartype value: str
+ """
+
+ _validation = {
+ "job_input_type": {"required": True},
+ "value": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "value": {"key": "value", "type": "str"},
+ }
+
+ def __init__(self, *, value: str, description: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword description: Description for the input.
+ :paramtype description: str
+ :keyword value: [Required] Literal value for the input. Required.
+ :paramtype value: str
+ """
+ super().__init__(description=description, **kwargs)
+ self.job_input_type: str = "literal"
+ self.value = value
+
+
+class ManagedComputeIdentity(MonitorComputeIdentityBase):
+ """Managed compute identity definition.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar compute_identity_type: [Required] Monitor compute identity type enum. Required. Known
+ values are: "AmlToken" and "ManagedIdentity".
+ :vartype compute_identity_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityType
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ """
+
+ _validation = {
+ "compute_identity_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "compute_identity_type": {"key": "computeIdentityType", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ }
+
+ def __init__(self, *, identity: Optional["_models.ManagedServiceIdentity"] = None, **kwargs: Any) -> None:
+ """
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ """
+ super().__init__(**kwargs)
+ self.compute_identity_type: str = "ManagedIdentity"
+ self.identity = identity
+
+
+class ManagedIdentity(IdentityConfiguration):
+ """Managed identity configuration.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar identity_type: [Required] Specifies the type of identity framework. Required. Known
+ values are: "Managed", "AMLToken", and "UserIdentity".
+ :vartype identity_type: str or
+ ~azure.mgmt.machinelearningservices.models.IdentityConfigurationType
+ :ivar client_id: Specifies a user-assigned identity by client ID. For system-assigned, do not
+ set this field.
+ :vartype client_id: str
+ :ivar object_id: Specifies a user-assigned identity by object ID. For system-assigned, do not
+ set this field.
+ :vartype object_id: str
+ :ivar resource_id: Specifies a user-assigned identity by ARM resource ID. For system-assigned,
+ do not set this field.
+ :vartype resource_id: str
+ """
+
+ _validation = {
+ "identity_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "identity_type": {"key": "identityType", "type": "str"},
+ "client_id": {"key": "clientId", "type": "str"},
+ "object_id": {"key": "objectId", "type": "str"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ client_id: Optional[str] = None,
+ object_id: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword client_id: Specifies a user-assigned identity by client ID. For system-assigned, do
+ not set this field.
+ :paramtype client_id: str
+ :keyword object_id: Specifies a user-assigned identity by object ID. For system-assigned, do
+ not set this field.
+ :paramtype object_id: str
+ :keyword resource_id: Specifies a user-assigned identity by ARM resource ID. For
+ system-assigned, do not set this field.
+ :paramtype resource_id: str
+ """
+ super().__init__(**kwargs)
+ self.identity_type: str = "Managed"
+ self.client_id = client_id
+ self.object_id = object_id
+ self.resource_id = resource_id
+
+
+class ManagedIdentityAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """ManagedIdentityAuthTypeWorkspaceConnectionProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
+ :ivar target:
+ :vartype target: str
+ :ivar credentials:
+ :vartype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionManagedIdentity
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
+ "target": {"key": "target", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionManagedIdentity"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
+ target: Optional[str] = None,
+ credentials: Optional["_models.WorkspaceConnectionManagedIdentity"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
+ :keyword target:
+ :paramtype target: str
+ :keyword credentials:
+ :paramtype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionManagedIdentity
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ metadata=metadata,
+ target=target,
+ **kwargs
+ )
+ self.auth_type: str = "ManagedIdentity"
+ self.credentials = credentials
+
+
+class ManagedNetworkProvisionOptions(_serialization.Model):
+ """Managed Network Provisioning options for managed network of a machine learning workspace.
+
+ :ivar include_spark:
+ :vartype include_spark: bool
+ """
+
+ _attribute_map = {
+ "include_spark": {"key": "includeSpark", "type": "bool"},
+ }
+
+ def __init__(self, *, include_spark: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword include_spark:
+ :paramtype include_spark: bool
+ """
+ super().__init__(**kwargs)
+ self.include_spark = include_spark
+
+
+class ManagedNetworkProvisionStatus(_serialization.Model):
+ """Status of the Provisioning for the managed network of a machine learning workspace.
+
+ :ivar spark_ready:
+ :vartype spark_ready: bool
+ :ivar status: Status for the managed network of a machine learning workspace. Known values are:
+ "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.ManagedNetworkStatus
+ """
+
+ _attribute_map = {
+ "spark_ready": {"key": "sparkReady", "type": "bool"},
+ "status": {"key": "status", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ spark_ready: Optional[bool] = None,
+ status: Optional[Union[str, "_models.ManagedNetworkStatus"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword spark_ready:
+ :paramtype spark_ready: bool
+ :keyword status: Status for the managed network of a machine learning workspace. Known values
+ are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.ManagedNetworkStatus
+ """
+ super().__init__(**kwargs)
+ self.spark_ready = spark_ready
+ self.status = status
+
+
+class ManagedNetworkSettings(_serialization.Model):
+ """Managed Network settings for a machine learning workspace.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar isolation_mode: Isolation mode for the managed network of a machine learning workspace.
+ Known values are: "Disabled", "AllowInternetOutbound", and "AllowOnlyApprovedOutbound".
+ :vartype isolation_mode: str or ~azure.mgmt.machinelearningservices.models.IsolationMode
+ :ivar network_id:
+ :vartype network_id: str
+ :ivar outbound_rules: Dictionary of :code:``.
+ :vartype outbound_rules: dict[str, ~azure.mgmt.machinelearningservices.models.OutboundRule]
+ :ivar status: Status of the Provisioning for the managed network of a machine learning
+ workspace.
+ :vartype status: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus
+ :ivar changeable_isolation_modes: Detail isolation modes for the managed network of a machine
+ learning workspace.
+ :vartype changeable_isolation_modes: list[str or
+ ~azure.mgmt.machinelearningservices.models.IsolationMode]
+ """
+
+ _validation = {
+ "network_id": {"readonly": True},
+ "changeable_isolation_modes": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "isolation_mode": {"key": "isolationMode", "type": "str"},
+ "network_id": {"key": "networkId", "type": "str"},
+ "outbound_rules": {"key": "outboundRules", "type": "{OutboundRule}"},
+ "status": {"key": "status", "type": "ManagedNetworkProvisionStatus"},
+ "changeable_isolation_modes": {"key": "changeableIsolationModes", "type": "[str]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ isolation_mode: Optional[Union[str, "_models.IsolationMode"]] = None,
+ outbound_rules: Optional[Dict[str, "_models.OutboundRule"]] = None,
+ status: Optional["_models.ManagedNetworkProvisionStatus"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword isolation_mode: Isolation mode for the managed network of a machine learning
+ workspace. Known values are: "Disabled", "AllowInternetOutbound", and
+ "AllowOnlyApprovedOutbound".
+ :paramtype isolation_mode: str or ~azure.mgmt.machinelearningservices.models.IsolationMode
+ :keyword outbound_rules: Dictionary of :code:``.
+ :paramtype outbound_rules: dict[str, ~azure.mgmt.machinelearningservices.models.OutboundRule]
+ :keyword status: Status of the Provisioning for the managed network of a machine learning
+ workspace.
+ :paramtype status: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus
+ """
+ super().__init__(**kwargs)
+ self.isolation_mode = isolation_mode
+ self.network_id = None
+ self.outbound_rules = outbound_rules
+ self.status = status
+ self.changeable_isolation_modes = None
+
+
+class ManagedOnlineDeployment(OnlineDeploymentProperties): # pylint: disable=too-many-instance-attributes
+ """Properties specific to a ManagedOnlineDeployment.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar code_configuration: Code configuration for the endpoint deployment.
+ :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :ivar description: Description of the endpoint deployment.
+ :vartype description: str
+ :ivar environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
+ :vartype environment_id: str
+ :ivar environment_variables: Environment variables configuration for the deployment.
+ :vartype environment_variables: dict[str, str]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar app_insights_enabled: If true, enables Application Insights logging.
+ :vartype app_insights_enabled: bool
+ :ivar data_collector: The mdc configuration, we disable mdc when it's null.
+ :vartype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :ivar egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :vartype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
+ values are: "Managed", "Kubernetes", and "AzureMLCompute".
+ :vartype endpoint_compute_type: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointComputeType
+ :ivar instance_type: Compute instance type.
+ :vartype instance_type: str
+ :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
+ :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar model: The URI path to the model.
+ :vartype model: str
+ :ivar model_mount_path: The path to mount the model in custom container.
+ :vartype model_mount_path: str
+ :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
+ "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
+ :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar request_settings: Request settings for the deployment.
+ :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :ivar scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ """
+
+ _validation = {
+ "endpoint_compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
+ "description": {"key": "description", "type": "str"},
+ "environment_id": {"key": "environmentId", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
+ "data_collector": {"key": "dataCollector", "type": "DataCollector"},
+ "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
+ "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
+ "model": {"key": "model", "type": "str"},
+ "model_mount_path": {"key": "modelMountPath", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
+ "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
+ "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
+ }
+
+ def __init__(
+ self,
+ *,
+ code_configuration: Optional["_models.CodeConfiguration"] = None,
+ description: Optional[str] = None,
+ environment_id: Optional[str] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ app_insights_enabled: bool = False,
+ data_collector: Optional["_models.DataCollector"] = None,
+ egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
+ instance_type: Optional[str] = None,
+ liveness_probe: Optional["_models.ProbeSettings"] = None,
+ model: Optional[str] = None,
+ model_mount_path: Optional[str] = None,
+ readiness_probe: Optional["_models.ProbeSettings"] = None,
+ request_settings: Optional["_models.OnlineRequestSettings"] = None,
+ scale_settings: Optional["_models.OnlineScaleSettings"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword code_configuration: Code configuration for the endpoint deployment.
+ :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :keyword description: Description of the endpoint deployment.
+ :paramtype description: str
+ :keyword environment_id: ARM resource ID of the environment specification for the endpoint
+ deployment.
+ :paramtype environment_id: str
+ :keyword environment_variables: Environment variables configuration for the deployment.
+ :paramtype environment_variables: dict[str, str]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword app_insights_enabled: If true, enables Application Insights logging.
+ :paramtype app_insights_enabled: bool
+ :keyword data_collector: The mdc configuration, we disable mdc when it's null.
+ :paramtype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :keyword egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :paramtype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :keyword instance_type: Compute instance type.
+ :paramtype instance_type: str
+ :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
+ :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword model: The URI path to the model.
+ :paramtype model: str
+ :keyword model_mount_path: The path to mount the model in custom container.
+ :paramtype model_mount_path: str
+ :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword request_settings: Request settings for the deployment.
+ :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :keyword scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ """
+ super().__init__(
+ code_configuration=code_configuration,
+ description=description,
+ environment_id=environment_id,
+ environment_variables=environment_variables,
+ properties=properties,
+ app_insights_enabled=app_insights_enabled,
+ data_collector=data_collector,
+ egress_public_network_access=egress_public_network_access,
+ instance_type=instance_type,
+ liveness_probe=liveness_probe,
+ model=model,
+ model_mount_path=model_mount_path,
+ readiness_probe=readiness_probe,
+ request_settings=request_settings,
+ scale_settings=scale_settings,
+ **kwargs
+ )
+ self.endpoint_compute_type: str = "Managed"
+
+
+class ManagedServiceIdentity(_serialization.Model):
+ """Managed service identity (system assigned and/or user assigned identities).
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar principal_id: The service principal ID of the system assigned identity. This property
+ will only be provided for a system assigned identity.
+ :vartype principal_id: str
+ :ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
+ provided for a system assigned identity.
+ :vartype tenant_id: str
+ :ivar type: Type of managed service identity (where both SystemAssigned and UserAssigned types
+ are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
+ "SystemAssigned,UserAssigned".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :ivar user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
+ The dictionary values can be empty objects ({}) in requests.
+ :vartype user_assigned_identities: dict[str,
+ ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ """
+
+ _validation = {
+ "principal_id": {"readonly": True},
+ "tenant_id": {"readonly": True},
+ "type": {"required": True},
+ }
+
+ _attribute_map = {
+ "principal_id": {"key": "principalId", "type": "str"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentity}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ type: Union[str, "_models.ManagedServiceIdentityType"],
+ user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword type: Type of managed service identity (where both SystemAssigned and UserAssigned
+ types are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
+ "SystemAssigned,UserAssigned".
+ :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :keyword user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
+ The dictionary values can be empty objects ({}) in requests.
+ :paramtype user_assigned_identities: dict[str,
+ ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ """
+ super().__init__(**kwargs)
+ self.principal_id = None
+ self.tenant_id = None
+ self.type = type
+ self.user_assigned_identities = user_assigned_identities
+
+
+class MaterializationComputeResource(_serialization.Model):
+ """Dto object representing compute resource.
+
+ :ivar instance_type: Specifies the instance type.
+ :vartype instance_type: str
+ """
+
+ _attribute_map = {
+ "instance_type": {"key": "instanceType", "type": "str"},
+ }
+
+ def __init__(self, *, instance_type: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword instance_type: Specifies the instance type.
+ :paramtype instance_type: str
+ """
+ super().__init__(**kwargs)
+ self.instance_type = instance_type
+
+
+class MaterializationSettings(_serialization.Model):
+ """MaterializationSettings.
+
+ :ivar notification: Specifies the notification details.
+ :vartype notification: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar resource: Specifies the compute resource settings.
+ :vartype resource: ~azure.mgmt.machinelearningservices.models.MaterializationComputeResource
+ :ivar schedule: Specifies the schedule details.
+ :vartype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceTrigger
+ :ivar spark_configuration: Specifies the spark compute settings.
+ :vartype spark_configuration: dict[str, str]
+ :ivar store_type: Specifies the stores to which materialization should happen. Known values
+ are: "None", "Online", "Offline", and "OnlineAndOffline".
+ :vartype store_type: str or ~azure.mgmt.machinelearningservices.models.MaterializationStoreType
+ """
+
+ _attribute_map = {
+ "notification": {"key": "notification", "type": "NotificationSetting"},
+ "resource": {"key": "resource", "type": "MaterializationComputeResource"},
+ "schedule": {"key": "schedule", "type": "RecurrenceTrigger"},
+ "spark_configuration": {"key": "sparkConfiguration", "type": "{str}"},
+ "store_type": {"key": "storeType", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ notification: Optional["_models.NotificationSetting"] = None,
+ resource: Optional["_models.MaterializationComputeResource"] = None,
+ schedule: Optional["_models.RecurrenceTrigger"] = None,
+ spark_configuration: Optional[Dict[str, str]] = None,
+ store_type: Optional[Union[str, "_models.MaterializationStoreType"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification: Specifies the notification details.
+ :paramtype notification: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword resource: Specifies the compute resource settings.
+ :paramtype resource: ~azure.mgmt.machinelearningservices.models.MaterializationComputeResource
+ :keyword schedule: Specifies the schedule details.
+ :paramtype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceTrigger
+ :keyword spark_configuration: Specifies the spark compute settings.
+ :paramtype spark_configuration: dict[str, str]
+ :keyword store_type: Specifies the stores to which materialization should happen. Known values
+ are: "None", "Online", "Offline", and "OnlineAndOffline".
+ :paramtype store_type: str or
+ ~azure.mgmt.machinelearningservices.models.MaterializationStoreType
+ """
+ super().__init__(**kwargs)
+ self.notification = notification
+ self.resource = resource
+ self.schedule = schedule
+ self.spark_configuration = spark_configuration
+ self.store_type = store_type
+
+
+class MedianStoppingPolicy(EarlyTerminationPolicy):
+ """Defines an early termination policy based on running averages of the primary metric of all
+ runs.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
+ :vartype delay_evaluation: int
+ :ivar evaluation_interval: Interval (number of runs) between policy evaluations.
+ :vartype evaluation_interval: int
+ :ivar policy_type: [Required] Name of policy configuration. Required. Known values are:
+ "Bandit", "MedianStopping", and "TruncationSelection".
+ :vartype policy_type: str or
+ ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
+ """
+
+ _validation = {
+ "policy_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "delay_evaluation": {"key": "delayEvaluation", "type": "int"},
+ "evaluation_interval": {"key": "evaluationInterval", "type": "int"},
+ "policy_type": {"key": "policyType", "type": "str"},
+ }
+
+ def __init__(self, *, delay_evaluation: int = 0, evaluation_interval: int = 0, **kwargs: Any) -> None:
"""
- super().__init__(
- ams_gradient=ams_gradient,
- augmentations=augmentations,
- beta1=beta1,
- beta2=beta2,
- distributed=distributed,
- early_stopping=early_stopping,
- early_stopping_delay=early_stopping_delay,
- early_stopping_patience=early_stopping_patience,
- enable_onnx_normalization=enable_onnx_normalization,
- evaluation_frequency=evaluation_frequency,
- gradient_accumulation_step=gradient_accumulation_step,
- layers_to_freeze=layers_to_freeze,
- learning_rate=learning_rate,
- learning_rate_scheduler=learning_rate_scheduler,
- model_name=model_name,
- momentum=momentum,
- nesterov=nesterov,
- number_of_epochs=number_of_epochs,
- number_of_workers=number_of_workers,
- optimizer=optimizer,
- random_seed=random_seed,
- step_lr_gamma=step_lr_gamma,
- step_lr_step_size=step_lr_step_size,
- training_batch_size=training_batch_size,
- validation_batch_size=validation_batch_size,
- warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
- warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
- weight_decay=weight_decay,
- **kwargs
- )
- self.box_detections_per_image = box_detections_per_image
- self.box_score_threshold = box_score_threshold
- self.image_size = image_size
- self.max_size = max_size
- self.min_size = min_size
- self.model_size = model_size
- self.multi_scale = multi_scale
- self.nms_iou_threshold = nms_iou_threshold
- self.tile_grid_size = tile_grid_size
- self.tile_overlap_ratio = tile_overlap_ratio
- self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
- self.validation_iou_threshold = validation_iou_threshold
- self.validation_metric_type = validation_metric_type
+ :keyword delay_evaluation: Number of intervals by which to delay the first evaluation.
+ :paramtype delay_evaluation: int
+ :keyword evaluation_interval: Interval (number of runs) between policy evaluations.
+ :paramtype evaluation_interval: int
+ """
+ super().__init__(delay_evaluation=delay_evaluation, evaluation_interval=evaluation_interval, **kwargs)
+ self.policy_type: str = "MedianStopping"
-class ImageModelSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes
- """Settings used for training the model.
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+class MLAssistConfiguration(_serialization.Model):
+ """Labeling MLAssist configuration definition.
- :ivar advanced_settings: Settings for advanced scenarios.
- :vartype advanced_settings: str
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: bool
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: float
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: float
- :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
- :vartype checkpoint_frequency: int
- :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
- :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :vartype checkpoint_run_id: str
- :ivar distributed: Whether to use distributed training.
- :vartype distributed: bool
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: bool
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: int
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: int
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: bool
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: int
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: int
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: int
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: float
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :vartype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: float
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: bool
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: int
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: int
- :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: int
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: float
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: int
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: int
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: int
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: float
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: int
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: float
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ MLAssistConfigurationDisabled, MLAssistConfigurationEnabled
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar ml_assist: [Required] Indicates whether MLAssist feature is enabled. Required. Known
+ values are: "Enabled" and "Disabled".
+ :vartype ml_assist: str or ~azure.mgmt.machinelearningservices.models.MLAssistConfigurationType
+ """
+
+ _validation = {
+ "ml_assist": {"required": True},
+ }
+
+ _attribute_map = {
+ "ml_assist": {"key": "mlAssist", "type": "str"},
+ }
+
+ _subtype_map = {
+ "ml_assist": {"Disabled": "MLAssistConfigurationDisabled", "Enabled": "MLAssistConfigurationEnabled"}
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.ml_assist: Optional[str] = None
+
+
+class MLAssistConfigurationDisabled(MLAssistConfiguration):
+ """Labeling MLAssist configuration definition when MLAssist is disabled.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar ml_assist: [Required] Indicates whether MLAssist feature is enabled. Required. Known
+ values are: "Enabled" and "Disabled".
+ :vartype ml_assist: str or ~azure.mgmt.machinelearningservices.models.MLAssistConfigurationType
+ """
+
+ _validation = {
+ "ml_assist": {"required": True},
+ }
+
+ _attribute_map = {
+ "ml_assist": {"key": "mlAssist", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.ml_assist: str = "Disabled"
+
+
+class MLAssistConfigurationEnabled(MLAssistConfiguration):
+ """Labeling MLAssist configuration definition when MLAssist is enabled.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar ml_assist: [Required] Indicates whether MLAssist feature is enabled. Required. Known
+ values are: "Enabled" and "Disabled".
+ :vartype ml_assist: str or ~azure.mgmt.machinelearningservices.models.MLAssistConfigurationType
+ :ivar inferencing_compute_binding: [Required] AML compute binding used in inferencing.
+ Required.
+ :vartype inferencing_compute_binding: str
+ :ivar training_compute_binding: [Required] AML compute binding used in training. Required.
+ :vartype training_compute_binding: str
+ """
+
+ _validation = {
+ "ml_assist": {"required": True},
+ "inferencing_compute_binding": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "training_compute_binding": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "ml_assist": {"key": "mlAssist", "type": "str"},
+ "inferencing_compute_binding": {"key": "inferencingComputeBinding", "type": "str"},
+ "training_compute_binding": {"key": "trainingComputeBinding", "type": "str"},
+ }
+
+ def __init__(self, *, inferencing_compute_binding: str, training_compute_binding: str, **kwargs: Any) -> None:
+ """
+ :keyword inferencing_compute_binding: [Required] AML compute binding used in inferencing.
+ Required.
+ :paramtype inferencing_compute_binding: str
+ :keyword training_compute_binding: [Required] AML compute binding used in training. Required.
+ :paramtype training_compute_binding: str
+ """
+ super().__init__(**kwargs)
+ self.ml_assist: str = "Enabled"
+ self.inferencing_compute_binding = inferencing_compute_binding
+ self.training_compute_binding = training_compute_binding
+
+
+class MLFlowModelJobInput(AssetJobInput, JobInput):
+ """MLFlowModelJobInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: Description for the input.
+ :vartype description: str
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ """
+
+ _validation = {
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ uri: str,
+ description: Optional[str] = None,
+ mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: Description for the input.
+ :paramtype description: str
+ :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ """
+ super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ self.description = description
+ self.job_input_type: str = "mlflow_model"
+ self.mode = mode
+ self.uri = uri
+
+
+class MLFlowModelJobOutput(AssetJobOutput, JobOutput):
+ """MLFlowModelJobOutput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: Description for the output.
+ :vartype description: str
+ :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
+ "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
+ :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
+ :ivar asset_name: Output Asset Name.
+ :vartype asset_name: str
+ :ivar asset_version: Output Asset Version.
+ :vartype asset_version: str
+ :ivar auto_delete_setting: Auto delete setting of output data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :ivar uri: Output Asset URI.
+ :vartype uri: str
"""
+ _validation = {
+ "job_output_type": {"required": True},
+ }
+
_attribute_map = {
- "advanced_settings": {"key": "advancedSettings", "type": "str"},
- "ams_gradient": {"key": "amsGradient", "type": "bool"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "float"},
- "beta2": {"key": "beta2", "type": "float"},
- "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
- "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
- "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
- "distributed": {"key": "distributed", "type": "bool"},
- "early_stopping": {"key": "earlyStopping", "type": "bool"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
- "learning_rate": {"key": "learningRate", "type": "float"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "float"},
- "nesterov": {"key": "nesterov", "type": "bool"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "int"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
- "weight_decay": {"key": "weightDecay", "type": "float"},
+ "description": {"key": "description", "type": "str"},
+ "job_output_type": {"key": "jobOutputType", "type": "str"},
+ "asset_name": {"key": "assetName", "type": "str"},
+ "asset_version": {"key": "assetVersion", "type": "str"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(
self,
*,
- advanced_settings: Optional[str] = None,
- ams_gradient: Optional[bool] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[float] = None,
- beta2: Optional[float] = None,
- checkpoint_frequency: Optional[int] = None,
- checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
- checkpoint_run_id: Optional[str] = None,
- distributed: Optional[bool] = None,
- early_stopping: Optional[bool] = None,
- early_stopping_delay: Optional[int] = None,
- early_stopping_patience: Optional[int] = None,
- enable_onnx_normalization: Optional[bool] = None,
- evaluation_frequency: Optional[int] = None,
- gradient_accumulation_step: Optional[int] = None,
- layers_to_freeze: Optional[int] = None,
- learning_rate: Optional[float] = None,
- learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
- model_name: Optional[str] = None,
- momentum: Optional[float] = None,
- nesterov: Optional[bool] = None,
- number_of_epochs: Optional[int] = None,
- number_of_workers: Optional[int] = None,
- optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
- random_seed: Optional[int] = None,
- step_lr_gamma: Optional[float] = None,
- step_lr_step_size: Optional[int] = None,
- training_batch_size: Optional[int] = None,
- validation_batch_size: Optional[int] = None,
- warmup_cosine_lr_cycles: Optional[float] = None,
- warmup_cosine_lr_warmup_epochs: Optional[int] = None,
- weight_decay: Optional[float] = None,
+ description: Optional[str] = None,
+ asset_name: Optional[str] = None,
+ asset_version: Optional[str] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
+ uri: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword advanced_settings: Settings for advanced scenarios.
- :paramtype advanced_settings: str
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: bool
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: float
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: float
- :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
- integer.
- :paramtype checkpoint_frequency: int
- :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
- :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :paramtype checkpoint_run_id: str
- :keyword distributed: Whether to use distributed training.
- :paramtype distributed: bool
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: bool
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: int
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: int
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: bool
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: int
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: int
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: int
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: float
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :paramtype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: float
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: bool
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: int
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: int
- :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: int
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: float
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: int
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: int
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: int
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: float
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: int
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: float
+ :keyword description: Description for the output.
+ :paramtype description: str
+ :keyword asset_name: Output Asset Name.
+ :paramtype asset_name: str
+ :keyword asset_version: Output Asset Version.
+ :paramtype asset_version: str
+ :keyword auto_delete_setting: Auto delete setting of output data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :keyword uri: Output Asset URI.
+ :paramtype uri: str
+ """
+ super().__init__(
+ asset_name=asset_name,
+ asset_version=asset_version,
+ auto_delete_setting=auto_delete_setting,
+ mode=mode,
+ uri=uri,
+ description=description,
+ **kwargs
+ )
+ self.description = description
+ self.job_output_type: str = "mlflow_model"
+ self.asset_name = asset_name
+ self.asset_version = asset_version
+ self.auto_delete_setting = auto_delete_setting
+ self.mode = mode
+ self.uri = uri
+
+
+class MLTableData(DataVersionBaseProperties): # pylint: disable=too-many-instance-attributes
+ """MLTable data definition.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :vartype is_archived: bool
+ :ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
+ "uri_folder", and "mltable".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
+ :ivar data_uri: [Required] Uri of the data. Example:
+ https://go.microsoft.com/fwlink/?linkid=2202330. Required.
+ :vartype data_uri: str
+ :ivar intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :ivar stage: Stage in the data lifecycle assigned to this data asset.
+ :vartype stage: str
+ :ivar referenced_uris: Uris referenced in the MLTable definition (required for lineage).
+ :vartype referenced_uris: list[str]
+ """
+
+ _validation = {
+ "data_type": {"required": True},
+ "data_uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "data_uri": {"key": "dataUri", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
+ "stage": {"key": "stage", "type": "str"},
+ "referenced_uris": {"key": "referencedUris", "type": "[str]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ data_uri: str,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ stage: Optional[str] = None,
+ referenced_uris: Optional[List[str]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- super().__init__(**kwargs)
- self.advanced_settings = advanced_settings
- self.ams_gradient = ams_gradient
- self.augmentations = augmentations
- self.beta1 = beta1
- self.beta2 = beta2
- self.checkpoint_frequency = checkpoint_frequency
- self.checkpoint_model = checkpoint_model
- self.checkpoint_run_id = checkpoint_run_id
- self.distributed = distributed
- self.early_stopping = early_stopping
- self.early_stopping_delay = early_stopping_delay
- self.early_stopping_patience = early_stopping_patience
- self.enable_onnx_normalization = enable_onnx_normalization
- self.evaluation_frequency = evaluation_frequency
- self.gradient_accumulation_step = gradient_accumulation_step
- self.layers_to_freeze = layers_to_freeze
- self.learning_rate = learning_rate
- self.learning_rate_scheduler = learning_rate_scheduler
- self.model_name = model_name
- self.momentum = momentum
- self.nesterov = nesterov
- self.number_of_epochs = number_of_epochs
- self.number_of_workers = number_of_workers
- self.optimizer = optimizer
- self.random_seed = random_seed
- self.step_lr_gamma = step_lr_gamma
- self.step_lr_step_size = step_lr_step_size
- self.training_batch_size = training_batch_size
- self.validation_batch_size = validation_batch_size
- self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
- self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
- self.weight_decay = weight_decay
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :paramtype is_archived: bool
+ :keyword data_uri: [Required] Uri of the data. Example:
+ https://go.microsoft.com/fwlink/?linkid=2202330. Required.
+ :paramtype data_uri: str
+ :keyword intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword stage: Stage in the data lifecycle assigned to this data asset.
+ :paramtype stage: str
+ :keyword referenced_uris: Uris referenced in the MLTable definition (required for lineage).
+ :paramtype referenced_uris: list[str]
+ """
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ auto_delete_setting=auto_delete_setting,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
+ data_uri=data_uri,
+ intellectual_property=intellectual_property,
+ stage=stage,
+ **kwargs
+ )
+ self.data_type: str = "mltable"
+ self.referenced_uris = referenced_uris
-class ImageModelSettingsClassification(ImageModelSettings): # pylint: disable=too-many-instance-attributes
- """Settings used for training the model.
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+class MLTableJobInput(AssetJobInput, JobInput):
+ """MLTableJobInput.
- :ivar advanced_settings: Settings for advanced scenarios.
- :vartype advanced_settings: str
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: bool
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: float
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: float
- :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
- :vartype checkpoint_frequency: int
- :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
- :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :vartype checkpoint_run_id: str
- :ivar distributed: Whether to use distributed training.
- :vartype distributed: bool
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: bool
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: int
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: int
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: bool
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: int
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: int
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: int
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: float
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :vartype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: float
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: bool
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: int
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: int
- :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: int
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: float
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: int
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: int
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: int
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: float
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: int
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: float
- :ivar training_crop_size: Image crop size that is input to the neural network for the training
- dataset. Must be a positive integer.
- :vartype training_crop_size: int
- :ivar validation_crop_size: Image crop size that is input to the neural network for the
- validation dataset. Must be a positive integer.
- :vartype validation_crop_size: int
- :ivar validation_resize_size: Image size to which to resize before cropping for validation
- dataset. Must be a positive integer.
- :vartype validation_resize_size: int
- :ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
- 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
- 0 or 1 or 2.
- :vartype weighted_loss: int
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: Description for the input.
+ :vartype description: str
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ """
+
+ _validation = {
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ uri: str,
+ description: Optional[str] = None,
+ mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: Description for the input.
+ :paramtype description: str
+ :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ """
+ super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ self.description = description
+ self.job_input_type: str = "mltable"
+ self.mode = mode
+ self.uri = uri
+
+
+class MLTableJobOutput(AssetJobOutput, JobOutput):
+ """MLTableJobOutput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: Description for the output.
+ :vartype description: str
+ :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
+ "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
+ :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
+ :ivar asset_name: Output Asset Name.
+ :vartype asset_name: str
+ :ivar asset_version: Output Asset Version.
+ :vartype asset_version: str
+ :ivar auto_delete_setting: Auto delete setting of output data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :ivar uri: Output Asset URI.
+ :vartype uri: str
+ """
+
+ _validation = {
+ "job_output_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "job_output_type": {"key": "jobOutputType", "type": "str"},
+ "asset_name": {"key": "assetName", "type": "str"},
+ "asset_version": {"key": "assetVersion", "type": "str"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ asset_name: Optional[str] = None,
+ asset_version: Optional[str] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
+ uri: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: Description for the output.
+ :paramtype description: str
+ :keyword asset_name: Output Asset Name.
+ :paramtype asset_name: str
+ :keyword asset_version: Output Asset Version.
+ :paramtype asset_version: str
+ :keyword auto_delete_setting: Auto delete setting of output data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :keyword uri: Output Asset URI.
+ :paramtype uri: str
+ """
+ super().__init__(
+ asset_name=asset_name,
+ asset_version=asset_version,
+ auto_delete_setting=auto_delete_setting,
+ mode=mode,
+ uri=uri,
+ description=description,
+ **kwargs
+ )
+ self.description = description
+ self.job_output_type: str = "mltable"
+ self.asset_name = asset_name
+ self.asset_version = asset_version
+ self.auto_delete_setting = auto_delete_setting
+ self.mode = mode
+ self.uri = uri
+
+
+class ModelConfiguration(_serialization.Model):
+ """Model configuration options.
+
+ :ivar mode: Input delivery mode for the model. Known values are: "Copy" and "Download".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.PackageInputDeliveryMode
+ :ivar mount_path: Relative mounting path of the model in the target image.
+ :vartype mount_path: str
"""
_attribute_map = {
- "advanced_settings": {"key": "advancedSettings", "type": "str"},
- "ams_gradient": {"key": "amsGradient", "type": "bool"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "float"},
- "beta2": {"key": "beta2", "type": "float"},
- "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
- "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
- "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
- "distributed": {"key": "distributed", "type": "bool"},
- "early_stopping": {"key": "earlyStopping", "type": "bool"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
- "learning_rate": {"key": "learningRate", "type": "float"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "float"},
- "nesterov": {"key": "nesterov", "type": "bool"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "int"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
- "weight_decay": {"key": "weightDecay", "type": "float"},
- "training_crop_size": {"key": "trainingCropSize", "type": "int"},
- "validation_crop_size": {"key": "validationCropSize", "type": "int"},
- "validation_resize_size": {"key": "validationResizeSize", "type": "int"},
- "weighted_loss": {"key": "weightedLoss", "type": "int"},
+ "mode": {"key": "mode", "type": "str"},
+ "mount_path": {"key": "mountPath", "type": "str"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(
self,
*,
- advanced_settings: Optional[str] = None,
- ams_gradient: Optional[bool] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[float] = None,
- beta2: Optional[float] = None,
- checkpoint_frequency: Optional[int] = None,
- checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
- checkpoint_run_id: Optional[str] = None,
- distributed: Optional[bool] = None,
- early_stopping: Optional[bool] = None,
- early_stopping_delay: Optional[int] = None,
- early_stopping_patience: Optional[int] = None,
- enable_onnx_normalization: Optional[bool] = None,
- evaluation_frequency: Optional[int] = None,
- gradient_accumulation_step: Optional[int] = None,
- layers_to_freeze: Optional[int] = None,
- learning_rate: Optional[float] = None,
- learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
- model_name: Optional[str] = None,
- momentum: Optional[float] = None,
- nesterov: Optional[bool] = None,
- number_of_epochs: Optional[int] = None,
- number_of_workers: Optional[int] = None,
- optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
- random_seed: Optional[int] = None,
- step_lr_gamma: Optional[float] = None,
- step_lr_step_size: Optional[int] = None,
- training_batch_size: Optional[int] = None,
- validation_batch_size: Optional[int] = None,
- warmup_cosine_lr_cycles: Optional[float] = None,
- warmup_cosine_lr_warmup_epochs: Optional[int] = None,
- weight_decay: Optional[float] = None,
- training_crop_size: Optional[int] = None,
- validation_crop_size: Optional[int] = None,
- validation_resize_size: Optional[int] = None,
- weighted_loss: Optional[int] = None,
+ mode: Optional[Union[str, "_models.PackageInputDeliveryMode"]] = None,
+ mount_path: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword advanced_settings: Settings for advanced scenarios.
- :paramtype advanced_settings: str
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: bool
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: float
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: float
- :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
- integer.
- :paramtype checkpoint_frequency: int
- :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
- :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :paramtype checkpoint_run_id: str
- :keyword distributed: Whether to use distributed training.
- :paramtype distributed: bool
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: bool
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: int
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: int
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: bool
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: int
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: int
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: int
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: float
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :paramtype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: float
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: bool
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: int
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: int
- :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: int
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: float
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: int
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: int
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: int
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: float
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: int
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: float
- :keyword training_crop_size: Image crop size that is input to the neural network for the
- training dataset. Must be a positive integer.
- :paramtype training_crop_size: int
- :keyword validation_crop_size: Image crop size that is input to the neural network for the
- validation dataset. Must be a positive integer.
- :paramtype validation_crop_size: int
- :keyword validation_resize_size: Image size to which to resize before cropping for validation
- dataset. Must be a positive integer.
- :paramtype validation_resize_size: int
- :keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
- 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
- 0 or 1 or 2.
- :paramtype weighted_loss: int
+ :keyword mode: Input delivery mode for the model. Known values are: "Copy" and "Download".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.PackageInputDeliveryMode
+ :keyword mount_path: Relative mounting path of the model in the target image.
+ :paramtype mount_path: str
+ """
+ super().__init__(**kwargs)
+ self.mode = mode
+ self.mount_path = mount_path
+
+
+class ModelContainer(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "ModelContainerProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.ModelContainerProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class ModelContainerProperties(AssetContainer):
+ """ModelContainerProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar latest_version: The latest version inside this container.
+ :vartype latest_version: str
+ :ivar next_version: The next auto incremental version.
+ :vartype next_version: str
+ :ivar provisioning_state: Provisioning state for the model container. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ """
+
+ _validation = {
+ "latest_version": {"readonly": True},
+ "next_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "latest_version": {"key": "latestVersion", "type": "str"},
+ "next_version": {"key": "nextVersion", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_archived: bool = False,
+ **kwargs: Any
+ ) -> None:
"""
- super().__init__(
- advanced_settings=advanced_settings,
- ams_gradient=ams_gradient,
- augmentations=augmentations,
- beta1=beta1,
- beta2=beta2,
- checkpoint_frequency=checkpoint_frequency,
- checkpoint_model=checkpoint_model,
- checkpoint_run_id=checkpoint_run_id,
- distributed=distributed,
- early_stopping=early_stopping,
- early_stopping_delay=early_stopping_delay,
- early_stopping_patience=early_stopping_patience,
- enable_onnx_normalization=enable_onnx_normalization,
- evaluation_frequency=evaluation_frequency,
- gradient_accumulation_step=gradient_accumulation_step,
- layers_to_freeze=layers_to_freeze,
- learning_rate=learning_rate,
- learning_rate_scheduler=learning_rate_scheduler,
- model_name=model_name,
- momentum=momentum,
- nesterov=nesterov,
- number_of_epochs=number_of_epochs,
- number_of_workers=number_of_workers,
- optimizer=optimizer,
- random_seed=random_seed,
- step_lr_gamma=step_lr_gamma,
- step_lr_step_size=step_lr_step_size,
- training_batch_size=training_batch_size,
- validation_batch_size=validation_batch_size,
- warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
- warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
- weight_decay=weight_decay,
- **kwargs
- )
- self.training_crop_size = training_crop_size
- self.validation_crop_size = validation_crop_size
- self.validation_resize_size = validation_resize_size
- self.weighted_loss = weighted_loss
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ """
+ super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
+ self.provisioning_state = None
-class ImageModelSettingsObjectDetection(ImageModelSettings): # pylint: disable=too-many-instance-attributes
- """Settings used for training the model.
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+class ModelContainerResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of ModelContainer entities.
- :ivar advanced_settings: Settings for advanced scenarios.
- :vartype advanced_settings: str
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: bool
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: float
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: float
- :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
- :vartype checkpoint_frequency: int
- :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
- :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :vartype checkpoint_run_id: str
- :ivar distributed: Whether to use distributed training.
- :vartype distributed: bool
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: bool
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: int
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: int
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: bool
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: int
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: int
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: int
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: float
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :vartype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: float
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: bool
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: int
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: int
- :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: int
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: float
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: int
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: int
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: int
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: float
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: int
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: float
- :ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
- be a positive integer.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype box_detections_per_image: int
- :ivar box_score_threshold: During inference, only return proposals with a classification score
- greater than
- BoxScoreThreshold. Must be a float in the range[0, 1].
- :vartype box_score_threshold: float
- :ivar image_size: Image size for train and validation. Must be a positive integer.
- Note: The training run may get into CUDA OOM if the size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype image_size: int
- :ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype max_size: int
- :ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype min_size: int
- :ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
- Note: training run may get into CUDA OOM if the model size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm. Known values are: "None",
- "Small", "Medium", "Large", and "ExtraLarge".
- :vartype model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
- :ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
- Note: training run may get into CUDA OOM if no sufficient GPU memory.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype multi_scale: bool
- :ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be a
- float in the range [0, 1].
- :vartype nms_iou_threshold: float
- :ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
- be
- None to enable small object detection logic. A string containing two integers in mxn format.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_grid_size: str
- :ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
- in the range [0, 1).
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_overlap_ratio: float
- :ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
- predictions from tiles and image.
- Used in validation/ inference. Must be float in the range [0, 1].
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_predictions_nms_threshold: float
- :ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
- float in the range [0, 1].
- :vartype validation_iou_threshold: float
- :ivar validation_metric_type: Metric computation method to use for validation metrics. Known
- values are: "None", "Coco", "Voc", and "CocoVoc".
- :vartype validation_metric_type: str or
- ~azure.mgmt.machinelearningservices.models.ValidationMetricType
+ :ivar next_link: The link to the next page of ModelContainer objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type ModelContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ModelContainer]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[ModelContainer]"},
+ }
+
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.ModelContainer"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of ModelContainer objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type ModelContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelContainer]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class ModelPackageInput(_serialization.Model):
+ """Model package input options.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar input_type: [Required] Type of the input included in the target image. Required. Known
+ values are: "UriFile" and "UriFolder".
+ :vartype input_type: str or ~azure.mgmt.machinelearningservices.models.PackageInputType
+ :ivar mode: Input delivery mode of the input. Known values are: "Copy" and "Download".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.PackageInputDeliveryMode
+ :ivar mount_path: Relative mount path of the input in the target image.
+ :vartype mount_path: str
+ :ivar path: [Required] Location of the input. Required.
+ :vartype path: ~azure.mgmt.machinelearningservices.models.PackageInputPathBase
"""
+ _validation = {
+ "input_type": {"required": True},
+ "path": {"required": True},
+ }
+
_attribute_map = {
- "advanced_settings": {"key": "advancedSettings", "type": "str"},
- "ams_gradient": {"key": "amsGradient", "type": "bool"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "float"},
- "beta2": {"key": "beta2", "type": "float"},
- "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
- "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
- "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
- "distributed": {"key": "distributed", "type": "bool"},
- "early_stopping": {"key": "earlyStopping", "type": "bool"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
- "learning_rate": {"key": "learningRate", "type": "float"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "float"},
- "nesterov": {"key": "nesterov", "type": "bool"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "int"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
- "weight_decay": {"key": "weightDecay", "type": "float"},
- "box_detections_per_image": {"key": "boxDetectionsPerImage", "type": "int"},
- "box_score_threshold": {"key": "boxScoreThreshold", "type": "float"},
- "image_size": {"key": "imageSize", "type": "int"},
- "max_size": {"key": "maxSize", "type": "int"},
- "min_size": {"key": "minSize", "type": "int"},
- "model_size": {"key": "modelSize", "type": "str"},
- "multi_scale": {"key": "multiScale", "type": "bool"},
- "nms_iou_threshold": {"key": "nmsIouThreshold", "type": "float"},
- "tile_grid_size": {"key": "tileGridSize", "type": "str"},
- "tile_overlap_ratio": {"key": "tileOverlapRatio", "type": "float"},
- "tile_predictions_nms_threshold": {"key": "tilePredictionsNmsThreshold", "type": "float"},
- "validation_iou_threshold": {"key": "validationIouThreshold", "type": "float"},
- "validation_metric_type": {"key": "validationMetricType", "type": "str"},
+ "input_type": {"key": "inputType", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "mount_path": {"key": "mountPath", "type": "str"},
+ "path": {"key": "path", "type": "PackageInputPathBase"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(
self,
*,
- advanced_settings: Optional[str] = None,
- ams_gradient: Optional[bool] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[float] = None,
- beta2: Optional[float] = None,
- checkpoint_frequency: Optional[int] = None,
- checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
- checkpoint_run_id: Optional[str] = None,
- distributed: Optional[bool] = None,
- early_stopping: Optional[bool] = None,
- early_stopping_delay: Optional[int] = None,
- early_stopping_patience: Optional[int] = None,
- enable_onnx_normalization: Optional[bool] = None,
- evaluation_frequency: Optional[int] = None,
- gradient_accumulation_step: Optional[int] = None,
- layers_to_freeze: Optional[int] = None,
- learning_rate: Optional[float] = None,
- learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
- model_name: Optional[str] = None,
- momentum: Optional[float] = None,
- nesterov: Optional[bool] = None,
- number_of_epochs: Optional[int] = None,
- number_of_workers: Optional[int] = None,
- optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
- random_seed: Optional[int] = None,
- step_lr_gamma: Optional[float] = None,
- step_lr_step_size: Optional[int] = None,
- training_batch_size: Optional[int] = None,
- validation_batch_size: Optional[int] = None,
- warmup_cosine_lr_cycles: Optional[float] = None,
- warmup_cosine_lr_warmup_epochs: Optional[int] = None,
- weight_decay: Optional[float] = None,
- box_detections_per_image: Optional[int] = None,
- box_score_threshold: Optional[float] = None,
- image_size: Optional[int] = None,
- max_size: Optional[int] = None,
- min_size: Optional[int] = None,
- model_size: Optional[Union[str, "_models.ModelSize"]] = None,
- multi_scale: Optional[bool] = None,
- nms_iou_threshold: Optional[float] = None,
- tile_grid_size: Optional[str] = None,
- tile_overlap_ratio: Optional[float] = None,
- tile_predictions_nms_threshold: Optional[float] = None,
- validation_iou_threshold: Optional[float] = None,
- validation_metric_type: Optional[Union[str, "_models.ValidationMetricType"]] = None,
+ input_type: Union[str, "_models.PackageInputType"],
+ path: "_models.PackageInputPathBase",
+ mode: Optional[Union[str, "_models.PackageInputDeliveryMode"]] = None,
+ mount_path: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword advanced_settings: Settings for advanced scenarios.
- :paramtype advanced_settings: str
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: bool
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: float
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: float
- :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
- integer.
- :paramtype checkpoint_frequency: int
- :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
- :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :paramtype checkpoint_run_id: str
- :keyword distributed: Whether to use distributed training.
- :paramtype distributed: bool
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: bool
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: int
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: int
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: bool
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: int
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: int
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: int
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: float
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :paramtype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: float
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: bool
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: int
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: int
- :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: int
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: float
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: int
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: int
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: int
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: float
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: int
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: float
- :keyword box_detections_per_image: Maximum number of detections per image, for all classes.
- Must be a positive integer.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype box_detections_per_image: int
- :keyword box_score_threshold: During inference, only return proposals with a classification
- score greater than
- BoxScoreThreshold. Must be a float in the range[0, 1].
- :paramtype box_score_threshold: float
- :keyword image_size: Image size for train and validation. Must be a positive integer.
- Note: The training run may get into CUDA OOM if the size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :paramtype image_size: int
- :keyword max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype max_size: int
- :keyword min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype min_size: int
- :keyword model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
- Note: training run may get into CUDA OOM if the model size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm. Known values are: "None",
- "Small", "Medium", "Large", and "ExtraLarge".
- :paramtype model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
- :keyword multi_scale: Enable multi-scale image by varying image size by +/- 50%.
- Note: training run may get into CUDA OOM if no sufficient GPU memory.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :paramtype multi_scale: bool
- :keyword nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
- a float in the range [0, 1].
- :paramtype nms_iou_threshold: float
- :keyword tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must
- not be
- None to enable small object detection logic. A string containing two integers in mxn format.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype tile_grid_size: str
- :keyword tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be
- float in the range [0, 1).
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype tile_overlap_ratio: float
- :keyword tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
- predictions from tiles and image.
- Used in validation/ inference. Must be float in the range [0, 1].
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype tile_predictions_nms_threshold: float
- :keyword validation_iou_threshold: IOU threshold to use when computing validation metric. Must
- be float in the range [0, 1].
- :paramtype validation_iou_threshold: float
- :keyword validation_metric_type: Metric computation method to use for validation metrics. Known
- values are: "None", "Coco", "Voc", and "CocoVoc".
- :paramtype validation_metric_type: str or
- ~azure.mgmt.machinelearningservices.models.ValidationMetricType
+ :keyword input_type: [Required] Type of the input included in the target image. Required. Known
+ values are: "UriFile" and "UriFolder".
+ :paramtype input_type: str or ~azure.mgmt.machinelearningservices.models.PackageInputType
+ :keyword mode: Input delivery mode of the input. Known values are: "Copy" and "Download".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.PackageInputDeliveryMode
+ :keyword mount_path: Relative mount path of the input in the target image.
+ :paramtype mount_path: str
+ :keyword path: [Required] Location of the input. Required.
+ :paramtype path: ~azure.mgmt.machinelearningservices.models.PackageInputPathBase
"""
- super().__init__(
- advanced_settings=advanced_settings,
- ams_gradient=ams_gradient,
- augmentations=augmentations,
- beta1=beta1,
- beta2=beta2,
- checkpoint_frequency=checkpoint_frequency,
- checkpoint_model=checkpoint_model,
- checkpoint_run_id=checkpoint_run_id,
- distributed=distributed,
- early_stopping=early_stopping,
- early_stopping_delay=early_stopping_delay,
- early_stopping_patience=early_stopping_patience,
- enable_onnx_normalization=enable_onnx_normalization,
- evaluation_frequency=evaluation_frequency,
- gradient_accumulation_step=gradient_accumulation_step,
- layers_to_freeze=layers_to_freeze,
- learning_rate=learning_rate,
- learning_rate_scheduler=learning_rate_scheduler,
- model_name=model_name,
- momentum=momentum,
- nesterov=nesterov,
- number_of_epochs=number_of_epochs,
- number_of_workers=number_of_workers,
- optimizer=optimizer,
- random_seed=random_seed,
- step_lr_gamma=step_lr_gamma,
- step_lr_step_size=step_lr_step_size,
- training_batch_size=training_batch_size,
- validation_batch_size=validation_batch_size,
- warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
- warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
- weight_decay=weight_decay,
- **kwargs
- )
- self.box_detections_per_image = box_detections_per_image
- self.box_score_threshold = box_score_threshold
- self.image_size = image_size
- self.max_size = max_size
- self.min_size = min_size
- self.model_size = model_size
- self.multi_scale = multi_scale
- self.nms_iou_threshold = nms_iou_threshold
- self.tile_grid_size = tile_grid_size
- self.tile_overlap_ratio = tile_overlap_ratio
- self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
- self.validation_iou_threshold = validation_iou_threshold
- self.validation_metric_type = validation_metric_type
+ super().__init__(**kwargs)
+ self.input_type = input_type
+ self.mode = mode
+ self.mount_path = mount_path
+ self.path = path
-class ImageObjectDetection(ImageObjectDetectionBase, AutoMLVertical): # pylint: disable=too-many-instance-attributes
- """Image Object Detection. Object detection is used to identify objects in an image and locate
- each object with a
- bounding box e.g. locate all dogs and cats in an image and draw a bounding box around each.
+class ModelPerformanceSignal(MonitoringSignalBase):
+ """Model performance signal definition.
All required parameters must be populated in order to send to Azure.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
- :ivar primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", "Custom",
+ "ModelPerformance", "GenerationSafetyQuality", and "GenerationTokenStatistics".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar data_segment: The data segment.
+ :vartype data_segment: ~azure.mgmt.machinelearningservices.models.MonitoringDataSegment
+ :ivar metric_threshold: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_threshold:
+ ~azure.mgmt.machinelearningservices.models.ModelPerformanceMetricThresholdBase
+ :ivar production_data: [Required] The data produced by the production service which performance
+ will be calculated for. Required.
+ :vartype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :ivar reference_data: [Required] The reference data used as the basis to calculate model
+ performance. Required.
+ :vartype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+
+ _validation = {
+ "signal_type": {"required": True},
+ "metric_threshold": {"required": True},
+ "production_data": {"required": True},
+ "reference_data": {"required": True},
+ }
+
+ _attribute_map = {
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "data_segment": {"key": "dataSegment", "type": "MonitoringDataSegment"},
+ "metric_threshold": {"key": "metricThreshold", "type": "ModelPerformanceMetricThresholdBase"},
+ "production_data": {"key": "productionData", "type": "[MonitoringInputDataBase]"},
+ "reference_data": {"key": "referenceData", "type": "MonitoringInputDataBase"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric_threshold: "_models.ModelPerformanceMetricThresholdBase",
+ production_data: List["_models.MonitoringInputDataBase"],
+ reference_data: "_models.MonitoringInputDataBase",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ data_segment: Optional["_models.MonitoringDataSegment"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword data_segment: The data segment.
+ :paramtype data_segment: ~azure.mgmt.machinelearningservices.models.MonitoringDataSegment
+ :keyword metric_threshold: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_threshold:
+ ~azure.mgmt.machinelearningservices.models.ModelPerformanceMetricThresholdBase
+ :keyword production_data: [Required] The data produced by the production service which
+ performance will be calculated for. Required.
+ :paramtype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :keyword reference_data: [Required] The reference data used as the basis to calculate model
+ performance. Required.
+ :paramtype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "ModelPerformance"
+ self.data_segment = data_segment
+ self.metric_threshold = metric_threshold
+ self.production_data = production_data
+ self.reference_data = reference_data
+
+
+class ModelVersion(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "ModelVersionProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.ModelVersionProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class ModelVersionProperties(AssetBase): # pylint: disable=too-many-instance-attributes
+ """Model asset version details.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :vartype is_archived: bool
+ :ivar flavors: Mapping of model flavors to their properties.
+ :vartype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
+ :ivar intellectual_property: Intellectual Property details. Used if model is an Intellectual
+ Property.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :ivar job_name: Name of the training job which produced this model.
+ :vartype job_name: str
+ :ivar model_type: The storage format for this entity. Used for NCD.
+ :vartype model_type: str
+ :ivar model_uri: The URI path to the model contents.
+ :vartype model_uri: str
+ :ivar provisioning_state: Provisioning state for the model version. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar stage: Stage in the model lifecycle assigned to this model.
+ :vartype stage: str
"""
- _validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
- "limit_settings": {"required": True},
- }
+ _validation = {
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "flavors": {"key": "flavors", "type": "{FlavorData}"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
+ "job_name": {"key": "jobName", "type": "str"},
+ "model_type": {"key": "modelType", "type": "str"},
+ "model_uri": {"key": "modelUri", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "stage": {"key": "stage", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ flavors: Optional[Dict[str, "_models.FlavorData"]] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ job_name: Optional[str] = None,
+ model_type: Optional[str] = None,
+ model_uri: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
+ :paramtype is_archived: bool
+ :keyword flavors: Mapping of model flavors to their properties.
+ :paramtype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
+ :keyword intellectual_property: Intellectual Property details. Used if model is an Intellectual
+ Property.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword job_name: Name of the training job which produced this model.
+ :paramtype job_name: str
+ :keyword model_type: The storage format for this entity. Used for NCD.
+ :paramtype model_type: str
+ :keyword model_uri: The URI path to the model contents.
+ :paramtype model_uri: str
+ :keyword stage: Stage in the model lifecycle assigned to this model.
+ :paramtype stage: str
+ """
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ auto_delete_setting=auto_delete_setting,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
+ **kwargs
+ )
+ self.flavors = flavors
+ self.intellectual_property = intellectual_property
+ self.job_name = job_name
+ self.model_type = model_type
+ self.model_uri = model_uri
+ self.provisioning_state = None
+ self.stage = stage
+
+
+class ModelVersionResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of ModelVersion entities.
+
+ :ivar next_link: The link to the next page of ModelVersion objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type ModelVersion.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ModelVersion]
+ """
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[ModelVersion]"},
}
def __init__(
- self,
- *,
- training_data: "_models.MLTableJobInput",
- limit_settings: "_models.ImageLimitSettings",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
- primary_metric: Optional[Union[str, "_models.ObjectDetectionPrimaryMetrics"]] = None,
- **kwargs: Any
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.ModelVersion"]] = None, **kwargs: Any
) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
- :keyword primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
+ :keyword next_link: The link to the next page of ModelVersion objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type ModelVersion.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelVersion]
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- model_settings=model_settings,
- search_space=search_space,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "ImageObjectDetection"
- self.training_data = training_data
- self.primary_metric = primary_metric
- self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.model_settings = model_settings
- self.search_space = search_space
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
-class ImageSweepSettings(_serialization.Model):
- """Model sweeping and hyperparameter sweeping related settings.
+class MonitorComputeConfigurationBase(_serialization.Model):
+ """Monitor compute configuration base definition.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ MonitorServerlessSparkCompute
All required parameters must be populated in order to send to Azure.
- :ivar early_termination: Type of early termination policy.
- :vartype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
- :ivar sampling_algorithm: [Required] Type of the hyperparameter sampling algorithms. Required.
- Known values are: "Grid", "Random", and "Bayesian".
- :vartype sampling_algorithm: str or
- ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ :ivar compute_type: [Required] Specifies the type of signal to monitor. Required.
+ "ServerlessSpark"
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.MonitorComputeType
"""
_validation = {
- "sampling_algorithm": {"required": True},
+ "compute_type": {"required": True},
}
_attribute_map = {
- "early_termination": {"key": "earlyTermination", "type": "EarlyTerminationPolicy"},
- "sampling_algorithm": {"key": "samplingAlgorithm", "type": "str"},
+ "compute_type": {"key": "computeType", "type": "str"},
}
- def __init__(
- self,
- *,
- sampling_algorithm: Union[str, "_models.SamplingAlgorithmType"],
- early_termination: Optional["_models.EarlyTerminationPolicy"] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword early_termination: Type of early termination policy.
- :paramtype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
- :keyword sampling_algorithm: [Required] Type of the hyperparameter sampling algorithms.
- Required. Known values are: "Grid", "Random", and "Bayesian".
- :paramtype sampling_algorithm: str or
- ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
- """
+ _subtype_map = {"compute_type": {"ServerlessSpark": "MonitorServerlessSparkCompute"}}
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.early_termination = early_termination
- self.sampling_algorithm = sampling_algorithm
+ self.compute_type: Optional[str] = None
-class InferenceContainerProperties(_serialization.Model):
- """InferenceContainerProperties.
+class MonitorDefinition(_serialization.Model):
+ """MonitorDefinition.
- :ivar liveness_route: The route to check the liveness of the inference server container.
- :vartype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
- :ivar readiness_route: The route to check the readiness of the inference server container.
- :vartype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
- :ivar scoring_route: The port to send the scoring requests to, within the inference server
- container.
- :vartype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar alert_notification_settings: The monitor's notification settings.
+ :vartype alert_notification_settings:
+ ~azure.mgmt.machinelearningservices.models.MonitorNotificationSettings
+ :ivar compute_configuration: [Required] The ARM resource ID of the compute resource to run the
+ monitoring job on. Required.
+ :vartype compute_configuration:
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeConfigurationBase
+ :ivar monitoring_target: The ARM resource ID of either the model or deployment targeted by this
+ monitor.
+ :vartype monitoring_target: ~azure.mgmt.machinelearningservices.models.MonitoringTarget
+ :ivar signals: [Required] The signals to monitor. Required.
+ :vartype signals: dict[str, ~azure.mgmt.machinelearningservices.models.MonitoringSignalBase]
"""
+ _validation = {
+ "compute_configuration": {"required": True},
+ "signals": {"required": True},
+ }
+
_attribute_map = {
- "liveness_route": {"key": "livenessRoute", "type": "Route"},
- "readiness_route": {"key": "readinessRoute", "type": "Route"},
- "scoring_route": {"key": "scoringRoute", "type": "Route"},
+ "alert_notification_settings": {"key": "alertNotificationSettings", "type": "MonitorNotificationSettings"},
+ "compute_configuration": {"key": "computeConfiguration", "type": "MonitorComputeConfigurationBase"},
+ "monitoring_target": {"key": "monitoringTarget", "type": "MonitoringTarget"},
+ "signals": {"key": "signals", "type": "{MonitoringSignalBase}"},
}
def __init__(
self,
*,
- liveness_route: Optional["_models.Route"] = None,
- readiness_route: Optional["_models.Route"] = None,
- scoring_route: Optional["_models.Route"] = None,
+ compute_configuration: "_models.MonitorComputeConfigurationBase",
+ signals: Dict[str, "_models.MonitoringSignalBase"],
+ alert_notification_settings: Optional["_models.MonitorNotificationSettings"] = None,
+ monitoring_target: Optional["_models.MonitoringTarget"] = None,
**kwargs: Any
) -> None:
"""
- :keyword liveness_route: The route to check the liveness of the inference server container.
- :paramtype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
- :keyword readiness_route: The route to check the readiness of the inference server container.
- :paramtype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
- :keyword scoring_route: The port to send the scoring requests to, within the inference server
- container.
- :paramtype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
+ :keyword alert_notification_settings: The monitor's notification settings.
+ :paramtype alert_notification_settings:
+ ~azure.mgmt.machinelearningservices.models.MonitorNotificationSettings
+ :keyword compute_configuration: [Required] The ARM resource ID of the compute resource to run
+ the monitoring job on. Required.
+ :paramtype compute_configuration:
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeConfigurationBase
+ :keyword monitoring_target: The ARM resource ID of either the model or deployment targeted by
+ this monitor.
+ :paramtype monitoring_target: ~azure.mgmt.machinelearningservices.models.MonitoringTarget
+ :keyword signals: [Required] The signals to monitor. Required.
+ :paramtype signals: dict[str, ~azure.mgmt.machinelearningservices.models.MonitoringSignalBase]
"""
super().__init__(**kwargs)
- self.liveness_route = liveness_route
- self.readiness_route = readiness_route
- self.scoring_route = scoring_route
+ self.alert_notification_settings = alert_notification_settings
+ self.compute_configuration = compute_configuration
+ self.monitoring_target = monitoring_target
+ self.signals = signals
-class InstanceTypeSchema(_serialization.Model):
- """Instance type schema.
+class MonitorEmailNotificationSettings(_serialization.Model):
+ """MonitorEmailNotificationSettings.
- :ivar node_selector: Node Selector.
- :vartype node_selector: dict[str, str]
- :ivar resources: Resource requests/limits for this instance type.
- :vartype resources: ~azure.mgmt.machinelearningservices.models.InstanceTypeSchemaResources
+ :ivar emails: This is the email recipient list which has a limitation of 499 characters in
+ total.
+ :vartype emails: list[str]
"""
_attribute_map = {
- "node_selector": {"key": "nodeSelector", "type": "{str}"},
- "resources": {"key": "resources", "type": "InstanceTypeSchemaResources"},
+ "emails": {"key": "emails", "type": "[str]"},
}
- def __init__(
- self,
- *,
- node_selector: Optional[Dict[str, str]] = None,
- resources: Optional["_models.InstanceTypeSchemaResources"] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, emails: Optional[List[str]] = None, **kwargs: Any) -> None:
"""
- :keyword node_selector: Node Selector.
- :paramtype node_selector: dict[str, str]
- :keyword resources: Resource requests/limits for this instance type.
- :paramtype resources: ~azure.mgmt.machinelearningservices.models.InstanceTypeSchemaResources
+ :keyword emails: This is the email recipient list which has a limitation of 499 characters in
+ total.
+ :paramtype emails: list[str]
"""
super().__init__(**kwargs)
- self.node_selector = node_selector
- self.resources = resources
+ self.emails = emails
-class InstanceTypeSchemaResources(_serialization.Model):
- """Resource requests/limits for this instance type.
+class MonitoringDataSegment(_serialization.Model):
+ """MonitoringDataSegment.
- :ivar requests: Resource requests for this instance type.
- :vartype requests: dict[str, str]
- :ivar limits: Resource limits for this instance type.
- :vartype limits: dict[str, str]
+ :ivar feature: The feature to segment the data on.
+ :vartype feature: str
+ :ivar values: Filters for only the specified values of the given segmented feature.
+ :vartype values: list[str]
"""
_attribute_map = {
- "requests": {"key": "requests", "type": "{str}"},
- "limits": {"key": "limits", "type": "{str}"},
+ "feature": {"key": "feature", "type": "str"},
+ "values": {"key": "values", "type": "[str]"},
}
- def __init__(
- self, *, requests: Optional[Dict[str, str]] = None, limits: Optional[Dict[str, str]] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, feature: Optional[str] = None, values: Optional[List[str]] = None, **kwargs: Any) -> None:
"""
- :keyword requests: Resource requests for this instance type.
- :paramtype requests: dict[str, str]
- :keyword limits: Resource limits for this instance type.
- :paramtype limits: dict[str, str]
+ :keyword feature: The feature to segment the data on.
+ :paramtype feature: str
+ :keyword values: Filters for only the specified values of the given segmented feature.
+ :paramtype values: list[str]
"""
super().__init__(**kwargs)
- self.requests = requests
- self.limits = limits
-
+ self.feature = feature
+ self.values = values
-class JobBase(Resource):
- """Azure Resource Manager resource envelope.
- Variables are only populated by the server, and will be ignored when sending a request.
+class MonitoringTarget(_serialization.Model):
+ """Monitoring target definition.
All required parameters must be populated in order to send to Azure.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ :ivar deployment_id: The ARM resource ID of either the deployment targeted by this monitor.
+ :vartype deployment_id: str
+ :ivar model_id: The ARM resource ID of either the model targeted by this monitor.
+ :vartype model_id: str
+ :ivar task_type: [Required] The machine learning task type of the model. Required. Known values
+ are: "Classification", "Regression", and "QuestionAnswering".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.ModelTaskType
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
+ "task_type": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "JobBaseProperties"},
+ "deployment_id": {"key": "deploymentId", "type": "str"},
+ "model_id": {"key": "modelId", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ task_type: Union[str, "_models.ModelTaskType"],
+ deployment_id: Optional[str] = None,
+ model_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword deployment_id: The ARM resource ID of either the deployment targeted by this monitor.
+ :paramtype deployment_id: str
+ :keyword model_id: The ARM resource ID of either the model targeted by this monitor.
+ :paramtype model_id: str
+ :keyword task_type: [Required] The machine learning task type of the model. Required. Known
+ values are: "Classification", "Regression", and "QuestionAnswering".
+ :paramtype task_type: str or ~azure.mgmt.machinelearningservices.models.ModelTaskType
+ """
+ super().__init__(**kwargs)
+ self.deployment_id = deployment_id
+ self.model_id = model_id
+ self.task_type = task_type
+
+
+class MonitoringThreshold(_serialization.Model):
+ """MonitoringThreshold.
+
+ :ivar value: The threshold value. If null, the set default is dependent on the metric type.
+ :vartype value: float
+ """
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "float"},
+ }
+
+ def __init__(self, *, value: Optional[float] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: The threshold value. If null, the set default is dependent on the metric type.
+ :paramtype value: float
+ """
+ super().__init__(**kwargs)
+ self.value = value
+
+
+class MonitoringWorkspaceConnection(_serialization.Model):
+ """Monitoring workspace connection definition.
+
+ :ivar environment_variables: The properties of a workspace service connection to store as
+ environment variables in the submitted jobs.
+ Key is workspace connection property path, name is environment variable key.
+ :vartype environment_variables: dict[str, str]
+ :ivar secrets: The properties of a workspace service connection to store as secrets in the
+ submitted jobs.
+ Key is workspace connection property path, name is secret key.
+ :vartype secrets: dict[str, str]
+ """
+
+ _attribute_map = {
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "secrets": {"key": "secrets", "type": "{str}"},
}
- def __init__(self, *, properties: "_models.JobBaseProperties", **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ environment_variables: Optional[Dict[str, str]] = None,
+ secrets: Optional[Dict[str, str]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ :keyword environment_variables: The properties of a workspace service connection to store as
+ environment variables in the submitted jobs.
+ Key is workspace connection property path, name is environment variable key.
+ :paramtype environment_variables: dict[str, str]
+ :keyword secrets: The properties of a workspace service connection to store as secrets in the
+ submitted jobs.
+ Key is workspace connection property path, name is secret key.
+ :paramtype secrets: dict[str, str]
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.environment_variables = environment_variables
+ self.secrets = secrets
-class JobBaseResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of JobBase entities.
+class MonitorNotificationSettings(_serialization.Model):
+ """MonitorNotificationSettings.
- :ivar next_link: The link to the next page of JobBase objects. If null, there are no additional
- pages.
- :vartype next_link: str
- :ivar value: An array of objects of type JobBase.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.JobBase]
+ :ivar email_notification_settings: The AML notification email settings.
+ :vartype email_notification_settings:
+ ~azure.mgmt.machinelearningservices.models.MonitorEmailNotificationSettings
"""
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[JobBase]"},
+ "email_notification_settings": {"key": "emailNotificationSettings", "type": "MonitorEmailNotificationSettings"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.JobBase"]] = None, **kwargs: Any
+ self, *, email_notification_settings: Optional["_models.MonitorEmailNotificationSettings"] = None, **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of JobBase objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type JobBase.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.JobBase]
+ :keyword email_notification_settings: The AML notification email settings.
+ :paramtype email_notification_settings:
+ ~azure.mgmt.machinelearningservices.models.MonitorEmailNotificationSettings
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
+ self.email_notification_settings = email_notification_settings
-class JobResourceConfiguration(ResourceConfiguration):
- """JobResourceConfiguration.
+class MonitorServerlessSparkCompute(MonitorComputeConfigurationBase):
+ """Monitor serverless spark compute definition.
- :ivar instance_count: Optional number of instances or nodes used by the compute target.
- :vartype instance_count: int
- :ivar instance_type: Optional type of VM used as supported by the compute target.
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar compute_type: [Required] Specifies the type of signal to monitor. Required.
+ "ServerlessSpark"
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.MonitorComputeType
+ :ivar compute_identity: [Required] The identity scheme leveraged to by the spark jobs running
+ on serverless Spark. Required.
+ :vartype compute_identity:
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityBase
+ :ivar instance_type: [Required] The instance type running the Spark job. Required.
:vartype instance_type: str
- :ivar properties: Additional properties bag.
- :vartype properties: dict[str, JSON]
- :ivar docker_args: Extra arguments to pass to the Docker run command. This would override any
- parameters that have already been set by the system, or in this section. This parameter is only
- supported for Azure ML compute types.
- :vartype docker_args: str
- :ivar shm_size: Size of the docker container's shared memory block. This should be in the
- format of (number)(unit) where number as to be greater than 0 and the unit can be one of
- b(bytes), k(kilobytes), m(megabytes), or g(gigabytes).
- :vartype shm_size: str
+ :ivar runtime_version: [Required] The Spark runtime version. Required.
+ :vartype runtime_version: str
"""
_validation = {
- "shm_size": {"pattern": r"\d+[bBkKmMgG]"},
+ "compute_type": {"required": True},
+ "compute_identity": {"required": True},
+ "instance_type": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "runtime_version": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "instance_count": {"key": "instanceCount", "type": "int"},
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_identity": {"key": "computeIdentity", "type": "MonitorComputeIdentityBase"},
"instance_type": {"key": "instanceType", "type": "str"},
- "properties": {"key": "properties", "type": "{object}"},
- "docker_args": {"key": "dockerArgs", "type": "str"},
- "shm_size": {"key": "shmSize", "type": "str"},
+ "runtime_version": {"key": "runtimeVersion", "type": "str"},
}
def __init__(
self,
*,
- instance_count: int = 1,
- instance_type: Optional[str] = None,
- properties: Optional[Dict[str, JSON]] = None,
- docker_args: Optional[str] = None,
- shm_size: str = "2g",
+ compute_identity: "_models.MonitorComputeIdentityBase",
+ instance_type: str,
+ runtime_version: str,
**kwargs: Any
) -> None:
"""
- :keyword instance_count: Optional number of instances or nodes used by the compute target.
- :paramtype instance_count: int
- :keyword instance_type: Optional type of VM used as supported by the compute target.
+ :keyword compute_identity: [Required] The identity scheme leveraged to by the spark jobs
+ running on serverless Spark. Required.
+ :paramtype compute_identity:
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityBase
+ :keyword instance_type: [Required] The instance type running the Spark job. Required.
:paramtype instance_type: str
- :keyword properties: Additional properties bag.
- :paramtype properties: dict[str, JSON]
- :keyword docker_args: Extra arguments to pass to the Docker run command. This would override
- any parameters that have already been set by the system, or in this section. This parameter is
- only supported for Azure ML compute types.
- :paramtype docker_args: str
- :keyword shm_size: Size of the docker container's shared memory block. This should be in the
- format of (number)(unit) where number as to be greater than 0 and the unit can be one of
- b(bytes), k(kilobytes), m(megabytes), or g(gigabytes).
- :paramtype shm_size: str
+ :keyword runtime_version: [Required] The Spark runtime version. Required.
+ :paramtype runtime_version: str
"""
- super().__init__(instance_count=instance_count, instance_type=instance_type, properties=properties, **kwargs)
- self.docker_args = docker_args
- self.shm_size = shm_size
+ super().__init__(**kwargs)
+ self.compute_type: str = "ServerlessSpark"
+ self.compute_identity = compute_identity
+ self.instance_type = instance_type
+ self.runtime_version = runtime_version
-class JobScheduleAction(ScheduleActionBase):
- """JobScheduleAction.
+class Mpi(DistributionConfiguration):
+ """MPI distribution configuration.
All required parameters must be populated in order to send to Azure.
- :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
- are: "CreateJob" and "InvokeBatchEndpoint".
- :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
- :ivar job_definition: [Required] Defines Schedule action definition details. Required.
- :vartype job_definition: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
+ Known values are: "PyTorch", "TensorFlow", "Mpi", and "Ray".
+ :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
+ :ivar process_count_per_instance: Number of processes per MPI node.
+ :vartype process_count_per_instance: int
"""
_validation = {
- "action_type": {"required": True},
- "job_definition": {"required": True},
+ "distribution_type": {"required": True},
}
_attribute_map = {
- "action_type": {"key": "actionType", "type": "str"},
- "job_definition": {"key": "jobDefinition", "type": "JobBaseProperties"},
+ "distribution_type": {"key": "distributionType", "type": "str"},
+ "process_count_per_instance": {"key": "processCountPerInstance", "type": "int"},
}
- def __init__(self, *, job_definition: "_models.JobBaseProperties", **kwargs: Any) -> None:
+ def __init__(self, *, process_count_per_instance: Optional[int] = None, **kwargs: Any) -> None:
"""
- :keyword job_definition: [Required] Defines Schedule action definition details. Required.
- :paramtype job_definition: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ :keyword process_count_per_instance: Number of processes per MPI node.
+ :paramtype process_count_per_instance: int
"""
super().__init__(**kwargs)
- self.action_type: str = "CreateJob"
- self.job_definition = job_definition
-
+ self.distribution_type: str = "Mpi"
+ self.process_count_per_instance = process_count_per_instance
-class JobService(_serialization.Model):
- """Job endpoint definition.
- Variables are only populated by the server, and will be ignored when sending a request.
+class NlpFixedParameters(_serialization.Model):
+ """Fixed training parameters that won't be swept over during AutoML NLP training.
- :ivar endpoint: Url for endpoint.
- :vartype endpoint: str
- :ivar error_message: Any error in the service.
- :vartype error_message: str
- :ivar job_service_type: Endpoint type.
- :vartype job_service_type: str
- :ivar nodes: Nodes that user would like to start the service on.
- If Nodes is not set or set to null, the service will only be started on leader node.
- :vartype nodes: ~azure.mgmt.machinelearningservices.models.Nodes
- :ivar port: Port for endpoint.
- :vartype port: int
- :ivar properties: Additional properties to set on the endpoint.
- :vartype properties: dict[str, str]
- :ivar status: Status of endpoint.
- :vartype status: str
+ :ivar gradient_accumulation_steps: Number of steps to accumulate gradients over before running
+ a backward pass.
+ :vartype gradient_accumulation_steps: int
+ :ivar learning_rate: The learning rate for the training procedure.
+ :vartype learning_rate: float
+ :ivar learning_rate_scheduler: The type of learning rate schedule to use during the training
+ procedure. Known values are: "None", "Linear", "Cosine", "CosineWithRestarts", "Polynomial",
+ "Constant", and "ConstantWithWarmup".
+ :vartype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.NlpLearningRateScheduler
+ :ivar model_name: The name of the model to train.
+ :vartype model_name: str
+ :ivar number_of_epochs: Number of training epochs.
+ :vartype number_of_epochs: int
+ :ivar training_batch_size: The batch size for the training procedure.
+ :vartype training_batch_size: int
+ :ivar validation_batch_size: The batch size to be used during evaluation.
+ :vartype validation_batch_size: int
+ :ivar warmup_ratio: The warmup ratio, used alongside LrSchedulerType.
+ :vartype warmup_ratio: float
+ :ivar weight_decay: The weight decay for the training procedure.
+ :vartype weight_decay: float
"""
- _validation = {
- "error_message": {"readonly": True},
- "status": {"readonly": True},
- }
-
_attribute_map = {
- "endpoint": {"key": "endpoint", "type": "str"},
- "error_message": {"key": "errorMessage", "type": "str"},
- "job_service_type": {"key": "jobServiceType", "type": "str"},
- "nodes": {"key": "nodes", "type": "Nodes"},
- "port": {"key": "port", "type": "int"},
- "properties": {"key": "properties", "type": "{str}"},
- "status": {"key": "status", "type": "str"},
+ "gradient_accumulation_steps": {"key": "gradientAccumulationSteps", "type": "int"},
+ "learning_rate": {"key": "learningRate", "type": "float"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
+ "warmup_ratio": {"key": "warmupRatio", "type": "float"},
+ "weight_decay": {"key": "weightDecay", "type": "float"},
}
def __init__(
self,
*,
- endpoint: Optional[str] = None,
- job_service_type: Optional[str] = None,
- nodes: Optional["_models.Nodes"] = None,
- port: Optional[int] = None,
- properties: Optional[Dict[str, str]] = None,
+ gradient_accumulation_steps: Optional[int] = None,
+ learning_rate: Optional[float] = None,
+ learning_rate_scheduler: Optional[Union[str, "_models.NlpLearningRateScheduler"]] = None,
+ model_name: Optional[str] = None,
+ number_of_epochs: Optional[int] = None,
+ training_batch_size: Optional[int] = None,
+ validation_batch_size: Optional[int] = None,
+ warmup_ratio: Optional[float] = None,
+ weight_decay: Optional[float] = None,
**kwargs: Any
) -> None:
"""
- :keyword endpoint: Url for endpoint.
- :paramtype endpoint: str
- :keyword job_service_type: Endpoint type.
- :paramtype job_service_type: str
- :keyword nodes: Nodes that user would like to start the service on.
- If Nodes is not set or set to null, the service will only be started on leader node.
- :paramtype nodes: ~azure.mgmt.machinelearningservices.models.Nodes
- :keyword port: Port for endpoint.
- :paramtype port: int
- :keyword properties: Additional properties to set on the endpoint.
- :paramtype properties: dict[str, str]
- """
- super().__init__(**kwargs)
- self.endpoint = endpoint
- self.error_message = None
- self.job_service_type = job_service_type
- self.nodes = nodes
- self.port = port
- self.properties = properties
- self.status = None
-
-
-class KubernetesSchema(_serialization.Model):
- """Kubernetes Compute Schema.
-
- :ivar properties: Properties of Kubernetes.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
- """
-
- _attribute_map = {
- "properties": {"key": "properties", "type": "KubernetesProperties"},
- }
-
- def __init__(self, *, properties: Optional["_models.KubernetesProperties"] = None, **kwargs: Any) -> None:
- """
- :keyword properties: Properties of Kubernetes.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ :keyword gradient_accumulation_steps: Number of steps to accumulate gradients over before
+ running a backward pass.
+ :paramtype gradient_accumulation_steps: int
+ :keyword learning_rate: The learning rate for the training procedure.
+ :paramtype learning_rate: float
+ :keyword learning_rate_scheduler: The type of learning rate schedule to use during the training
+ procedure. Known values are: "None", "Linear", "Cosine", "CosineWithRestarts", "Polynomial",
+ "Constant", and "ConstantWithWarmup".
+ :paramtype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.NlpLearningRateScheduler
+ :keyword model_name: The name of the model to train.
+ :paramtype model_name: str
+ :keyword number_of_epochs: Number of training epochs.
+ :paramtype number_of_epochs: int
+ :keyword training_batch_size: The batch size for the training procedure.
+ :paramtype training_batch_size: int
+ :keyword validation_batch_size: The batch size to be used during evaluation.
+ :paramtype validation_batch_size: int
+ :keyword warmup_ratio: The warmup ratio, used alongside LrSchedulerType.
+ :paramtype warmup_ratio: float
+ :keyword weight_decay: The weight decay for the training procedure.
+ :paramtype weight_decay: float
"""
super().__init__(**kwargs)
- self.properties = properties
-
-
-class Kubernetes(Compute, KubernetesSchema): # pylint: disable=too-many-instance-attributes
- """A Machine Learning compute based on Kubernetes Compute.
+ self.gradient_accumulation_steps = gradient_accumulation_steps
+ self.learning_rate = learning_rate
+ self.learning_rate_scheduler = learning_rate_scheduler
+ self.model_name = model_name
+ self.number_of_epochs = number_of_epochs
+ self.training_batch_size = training_batch_size
+ self.validation_batch_size = validation_batch_size
+ self.warmup_ratio = warmup_ratio
+ self.weight_decay = weight_decay
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class NlpParameterSubspace(_serialization.Model):
+ """Stringified search spaces for each parameter. See below examples.
- :ivar properties: Properties of Kubernetes.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
- :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
- "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
- "DataLakeAnalytics", and "SynapseSpark".
- :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
- :ivar compute_location: Location for the underlying compute.
- :vartype compute_location: str
- :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
- Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
- "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningState
- :ivar description: The description of the Machine Learning compute.
- :vartype description: str
- :ivar created_on: The time at which the compute was created.
- :vartype created_on: ~datetime.datetime
- :ivar modified_on: The time at which the compute was last modified.
- :vartype modified_on: ~datetime.datetime
- :ivar resource_id: ARM resource id of the underlying compute.
- :vartype resource_id: str
- :ivar provisioning_errors: Errors during provisioning.
- :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
- :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
- from outside if true, or machine learning service provisioned it if false.
- :vartype is_attached_compute: bool
- :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
- and AAD exclusively for authentication.
- :vartype disable_local_auth: bool
+ :ivar gradient_accumulation_steps: Number of steps to accumulate gradients over before running
+ a backward pass.
+ :vartype gradient_accumulation_steps: str
+ :ivar learning_rate: The learning rate for the training procedure.
+ :vartype learning_rate: str
+ :ivar learning_rate_scheduler: The type of learning rate schedule to use during the training
+ procedure.
+ :vartype learning_rate_scheduler: str
+ :ivar model_name: The name of the model to train.
+ :vartype model_name: str
+ :ivar number_of_epochs: Number of training epochs.
+ :vartype number_of_epochs: str
+ :ivar training_batch_size: The batch size for the training procedure.
+ :vartype training_batch_size: str
+ :ivar validation_batch_size: The batch size to be used during evaluation.
+ :vartype validation_batch_size: str
+ :ivar warmup_ratio: The warmup ratio, used alongside LrSchedulerType.
+ :vartype warmup_ratio: str
+ :ivar weight_decay: The weight decay for the training procedure.
+ :vartype weight_decay: str
"""
- _validation = {
- "compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
- "created_on": {"readonly": True},
- "modified_on": {"readonly": True},
- "provisioning_errors": {"readonly": True},
- "is_attached_compute": {"readonly": True},
- }
-
_attribute_map = {
- "properties": {"key": "properties", "type": "KubernetesProperties"},
- "compute_type": {"key": "computeType", "type": "str"},
- "compute_location": {"key": "computeLocation", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "created_on": {"key": "createdOn", "type": "iso-8601"},
- "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
- "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
- "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ "gradient_accumulation_steps": {"key": "gradientAccumulationSteps", "type": "str"},
+ "learning_rate": {"key": "learningRate", "type": "str"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
+ "warmup_ratio": {"key": "warmupRatio", "type": "str"},
+ "weight_decay": {"key": "weightDecay", "type": "str"},
}
def __init__(
self,
*,
- properties: Optional["_models.KubernetesProperties"] = None,
- compute_location: Optional[str] = None,
- description: Optional[str] = None,
- resource_id: Optional[str] = None,
- disable_local_auth: Optional[bool] = None,
+ gradient_accumulation_steps: Optional[str] = None,
+ learning_rate: Optional[str] = None,
+ learning_rate_scheduler: Optional[str] = None,
+ model_name: Optional[str] = None,
+ number_of_epochs: Optional[str] = None,
+ training_batch_size: Optional[str] = None,
+ validation_batch_size: Optional[str] = None,
+ warmup_ratio: Optional[str] = None,
+ weight_decay: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword properties: Properties of Kubernetes.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
- :keyword compute_location: Location for the underlying compute.
- :paramtype compute_location: str
- :keyword description: The description of the Machine Learning compute.
- :paramtype description: str
- :keyword resource_id: ARM resource id of the underlying compute.
- :paramtype resource_id: str
- :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
- MSI and AAD exclusively for authentication.
- :paramtype disable_local_auth: bool
+ :keyword gradient_accumulation_steps: Number of steps to accumulate gradients over before
+ running a backward pass.
+ :paramtype gradient_accumulation_steps: str
+ :keyword learning_rate: The learning rate for the training procedure.
+ :paramtype learning_rate: str
+ :keyword learning_rate_scheduler: The type of learning rate schedule to use during the training
+ procedure.
+ :paramtype learning_rate_scheduler: str
+ :keyword model_name: The name of the model to train.
+ :paramtype model_name: str
+ :keyword number_of_epochs: Number of training epochs.
+ :paramtype number_of_epochs: str
+ :keyword training_batch_size: The batch size for the training procedure.
+ :paramtype training_batch_size: str
+ :keyword validation_batch_size: The batch size to be used during evaluation.
+ :paramtype validation_batch_size: str
+ :keyword warmup_ratio: The warmup ratio, used alongside LrSchedulerType.
+ :paramtype warmup_ratio: str
+ :keyword weight_decay: The weight decay for the training procedure.
+ :paramtype weight_decay: str
"""
- super().__init__(
- compute_location=compute_location,
- description=description,
- resource_id=resource_id,
- disable_local_auth=disable_local_auth,
- properties=properties,
- **kwargs
- )
- self.properties = properties
- self.compute_type: str = "Kubernetes"
- self.compute_location = compute_location
- self.provisioning_state = None
- self.description = description
- self.created_on = None
- self.modified_on = None
- self.resource_id = resource_id
- self.provisioning_errors = None
- self.is_attached_compute = None
- self.disable_local_auth = disable_local_auth
-
-
-class OnlineDeploymentProperties(EndpointDeploymentPropertiesBase): # pylint: disable=too-many-instance-attributes
- """OnlineDeploymentProperties.
+ super().__init__(**kwargs)
+ self.gradient_accumulation_steps = gradient_accumulation_steps
+ self.learning_rate = learning_rate
+ self.learning_rate_scheduler = learning_rate_scheduler
+ self.model_name = model_name
+ self.number_of_epochs = number_of_epochs
+ self.training_batch_size = training_batch_size
+ self.validation_batch_size = validation_batch_size
+ self.warmup_ratio = warmup_ratio
+ self.weight_decay = weight_decay
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- KubernetesOnlineDeployment, ManagedOnlineDeployment
- Variables are only populated by the server, and will be ignored when sending a request.
+class NlpSweepSettings(_serialization.Model):
+ """Model sweeping and hyperparameter tuning related settings.
All required parameters must be populated in order to send to Azure.
- :ivar code_configuration: Code configuration for the endpoint deployment.
- :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :ivar description: Description of the endpoint deployment.
- :vartype description: str
- :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :vartype environment_id: str
- :ivar environment_variables: Environment variables configuration for the deployment.
- :vartype environment_variables: dict[str, str]
- :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
- :vartype properties: dict[str, str]
- :ivar app_insights_enabled: If true, enables Application Insights logging.
- :vartype app_insights_enabled: bool
- :ivar egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :vartype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
- values are: "Managed", "Kubernetes", and "AzureMLCompute".
- :vartype endpoint_compute_type: str or
- ~azure.mgmt.machinelearningservices.models.EndpointComputeType
- :ivar instance_type: Compute instance type.
- :vartype instance_type: str
- :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
- :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar model: The URI path to the model.
- :vartype model: str
- :ivar model_mount_path: The path to mount the model in custom container.
- :vartype model_mount_path: str
- :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
- "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
- :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar request_settings: Request settings for the deployment.
- :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :ivar scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ :ivar early_termination: Type of early termination policy for the sweeping job.
+ :vartype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
+ :ivar sampling_algorithm: [Required] Type of sampling algorithm. Required. Known values are:
+ "Grid", "Random", and "Bayesian".
+ :vartype sampling_algorithm: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
"""
_validation = {
- "endpoint_compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
+ "sampling_algorithm": {"required": True},
}
_attribute_map = {
- "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
- "description": {"key": "description", "type": "str"},
- "environment_id": {"key": "environmentId", "type": "str"},
- "environment_variables": {"key": "environmentVariables", "type": "{str}"},
- "properties": {"key": "properties", "type": "{str}"},
- "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
- "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
- "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
- "model": {"key": "model", "type": "str"},
- "model_mount_path": {"key": "modelMountPath", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
- "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
- "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
- }
-
- _subtype_map = {
- "endpoint_compute_type": {"Kubernetes": "KubernetesOnlineDeployment", "Managed": "ManagedOnlineDeployment"}
+ "early_termination": {"key": "earlyTermination", "type": "EarlyTerminationPolicy"},
+ "sampling_algorithm": {"key": "samplingAlgorithm", "type": "str"},
}
def __init__(
self,
*,
- code_configuration: Optional["_models.CodeConfiguration"] = None,
- description: Optional[str] = None,
- environment_id: Optional[str] = None,
- environment_variables: Optional[Dict[str, str]] = None,
- properties: Optional[Dict[str, str]] = None,
- app_insights_enabled: bool = False,
- egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
- instance_type: Optional[str] = None,
- liveness_probe: Optional["_models.ProbeSettings"] = None,
- model: Optional[str] = None,
- model_mount_path: Optional[str] = None,
- readiness_probe: Optional["_models.ProbeSettings"] = None,
- request_settings: Optional["_models.OnlineRequestSettings"] = None,
- scale_settings: Optional["_models.OnlineScaleSettings"] = None,
+ sampling_algorithm: Union[str, "_models.SamplingAlgorithmType"],
+ early_termination: Optional["_models.EarlyTerminationPolicy"] = None,
**kwargs: Any
) -> None:
"""
- :keyword code_configuration: Code configuration for the endpoint deployment.
- :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :keyword description: Description of the endpoint deployment.
- :paramtype description: str
- :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :paramtype environment_id: str
- :keyword environment_variables: Environment variables configuration for the deployment.
- :paramtype environment_variables: dict[str, str]
- :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
- :paramtype properties: dict[str, str]
- :keyword app_insights_enabled: If true, enables Application Insights logging.
- :paramtype app_insights_enabled: bool
- :keyword egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :paramtype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :keyword instance_type: Compute instance type.
- :paramtype instance_type: str
- :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
- :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword model: The URI path to the model.
- :paramtype model: str
- :keyword model_mount_path: The path to mount the model in custom container.
- :paramtype model_mount_path: str
- :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword request_settings: Request settings for the deployment.
- :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :keyword scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ :keyword early_termination: Type of early termination policy for the sweeping job.
+ :paramtype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
+ :keyword sampling_algorithm: [Required] Type of sampling algorithm. Required. Known values are:
+ "Grid", "Random", and "Bayesian".
+ :paramtype sampling_algorithm: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
"""
- super().__init__(
- code_configuration=code_configuration,
- description=description,
- environment_id=environment_id,
- environment_variables=environment_variables,
- properties=properties,
- **kwargs
- )
- self.app_insights_enabled = app_insights_enabled
- self.egress_public_network_access = egress_public_network_access
- self.endpoint_compute_type: Optional[str] = None
- self.instance_type = instance_type
- self.liveness_probe = liveness_probe
- self.model = model
- self.model_mount_path = model_mount_path
- self.provisioning_state = None
- self.readiness_probe = readiness_probe
- self.request_settings = request_settings
- self.scale_settings = scale_settings
+ super().__init__(**kwargs)
+ self.early_termination = early_termination
+ self.sampling_algorithm = sampling_algorithm
+
+
+class NlpVertical(_serialization.Model):
+ """Abstract class for NLP related AutoML tasks.
+ NLP - Natural Language Processing.
+
+ :ivar featurization_settings: Featurization inputs needed for AutoML job.
+ :vartype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :ivar fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :vartype fixed_parameters: ~azure.mgmt.machinelearningservices.models.NlpFixedParameters
+ :ivar limit_settings: Execution constraints for AutoMLJob.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space: list[~azure.mgmt.machinelearningservices.models.NlpParameterSubspace]
+ :ivar sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.NlpSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ """
+ _attribute_map = {
+ "featurization_settings": {"key": "featurizationSettings", "type": "NlpVerticalFeaturizationSettings"},
+ "fixed_parameters": {"key": "fixedParameters", "type": "NlpFixedParameters"},
+ "limit_settings": {"key": "limitSettings", "type": "NlpVerticalLimitSettings"},
+ "search_space": {"key": "searchSpace", "type": "[NlpParameterSubspace]"},
+ "sweep_settings": {"key": "sweepSettings", "type": "NlpSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ }
-class KubernetesOnlineDeployment(OnlineDeploymentProperties): # pylint: disable=too-many-instance-attributes
- """Properties specific to a KubernetesOnlineDeployment.
+ def __init__(
+ self,
+ *,
+ featurization_settings: Optional["_models.NlpVerticalFeaturizationSettings"] = None,
+ fixed_parameters: Optional["_models.NlpFixedParameters"] = None,
+ limit_settings: Optional["_models.NlpVerticalLimitSettings"] = None,
+ search_space: Optional[List["_models.NlpParameterSubspace"]] = None,
+ sweep_settings: Optional["_models.NlpSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword featurization_settings: Featurization inputs needed for AutoML job.
+ :paramtype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :keyword fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :paramtype fixed_parameters: ~azure.mgmt.machinelearningservices.models.NlpFixedParameters
+ :keyword limit_settings: Execution constraints for AutoMLJob.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space: list[~azure.mgmt.machinelearningservices.models.NlpParameterSubspace]
+ :keyword sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.NlpSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ """
+ super().__init__(**kwargs)
+ self.featurization_settings = featurization_settings
+ self.fixed_parameters = fixed_parameters
+ self.limit_settings = limit_settings
+ self.search_space = search_space
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class NlpVerticalFeaturizationSettings(FeaturizationSettings):
+ """NlpVerticalFeaturizationSettings.
- :ivar code_configuration: Code configuration for the endpoint deployment.
- :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :ivar description: Description of the endpoint deployment.
- :vartype description: str
- :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :vartype environment_id: str
- :ivar environment_variables: Environment variables configuration for the deployment.
- :vartype environment_variables: dict[str, str]
- :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
- :vartype properties: dict[str, str]
- :ivar app_insights_enabled: If true, enables Application Insights logging.
- :vartype app_insights_enabled: bool
- :ivar egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :vartype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
- values are: "Managed", "Kubernetes", and "AzureMLCompute".
- :vartype endpoint_compute_type: str or
- ~azure.mgmt.machinelearningservices.models.EndpointComputeType
- :ivar instance_type: Compute instance type.
- :vartype instance_type: str
- :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
- :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar model: The URI path to the model.
- :vartype model: str
- :ivar model_mount_path: The path to mount the model in custom container.
- :vartype model_mount_path: str
- :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
- "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
- :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar request_settings: Request settings for the deployment.
- :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :ivar scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
- :ivar container_resource_requirements: The resource requirements for the container (cpu and
- memory).
- :vartype container_resource_requirements:
- ~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
+ :ivar dataset_language: Dataset language, useful for the text data.
+ :vartype dataset_language: str
"""
- _validation = {
- "endpoint_compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
+ _attribute_map = {
+ "dataset_language": {"key": "datasetLanguage", "type": "str"},
}
+ def __init__(self, *, dataset_language: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword dataset_language: Dataset language, useful for the text data.
+ :paramtype dataset_language: str
+ """
+ super().__init__(dataset_language=dataset_language, **kwargs)
+
+
+class NlpVerticalLimitSettings(_serialization.Model):
+ """Job execution constraints.
+
+ :ivar max_concurrent_trials: Maximum Concurrent AutoML iterations.
+ :vartype max_concurrent_trials: int
+ :ivar max_nodes: Maximum nodes to use for the experiment.
+ :vartype max_nodes: int
+ :ivar max_trials: Number of AutoML iterations.
+ :vartype max_trials: int
+ :ivar timeout: AutoML job timeout.
+ :vartype timeout: ~datetime.timedelta
+ :ivar trial_timeout: Timeout for individual HD trials.
+ :vartype trial_timeout: ~datetime.timedelta
+ """
+
_attribute_map = {
- "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
- "description": {"key": "description", "type": "str"},
- "environment_id": {"key": "environmentId", "type": "str"},
- "environment_variables": {"key": "environmentVariables", "type": "{str}"},
- "properties": {"key": "properties", "type": "{str}"},
- "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
- "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
- "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
- "model": {"key": "model", "type": "str"},
- "model_mount_path": {"key": "modelMountPath", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
- "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
- "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
- "container_resource_requirements": {
- "key": "containerResourceRequirements",
- "type": "ContainerResourceRequirements",
- },
+ "max_concurrent_trials": {"key": "maxConcurrentTrials", "type": "int"},
+ "max_nodes": {"key": "maxNodes", "type": "int"},
+ "max_trials": {"key": "maxTrials", "type": "int"},
+ "timeout": {"key": "timeout", "type": "duration"},
+ "trial_timeout": {"key": "trialTimeout", "type": "duration"},
}
def __init__(
self,
*,
- code_configuration: Optional["_models.CodeConfiguration"] = None,
- description: Optional[str] = None,
- environment_id: Optional[str] = None,
- environment_variables: Optional[Dict[str, str]] = None,
- properties: Optional[Dict[str, str]] = None,
- app_insights_enabled: bool = False,
- egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
- instance_type: Optional[str] = None,
- liveness_probe: Optional["_models.ProbeSettings"] = None,
- model: Optional[str] = None,
- model_mount_path: Optional[str] = None,
- readiness_probe: Optional["_models.ProbeSettings"] = None,
- request_settings: Optional["_models.OnlineRequestSettings"] = None,
- scale_settings: Optional["_models.OnlineScaleSettings"] = None,
- container_resource_requirements: Optional["_models.ContainerResourceRequirements"] = None,
+ max_concurrent_trials: int = 1,
+ max_nodes: int = 1,
+ max_trials: int = 1,
+ timeout: datetime.timedelta = "P7D",
+ trial_timeout: Optional[datetime.timedelta] = None,
**kwargs: Any
) -> None:
"""
- :keyword code_configuration: Code configuration for the endpoint deployment.
- :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :keyword description: Description of the endpoint deployment.
- :paramtype description: str
- :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :paramtype environment_id: str
- :keyword environment_variables: Environment variables configuration for the deployment.
- :paramtype environment_variables: dict[str, str]
- :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
- :paramtype properties: dict[str, str]
- :keyword app_insights_enabled: If true, enables Application Insights logging.
- :paramtype app_insights_enabled: bool
- :keyword egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :paramtype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :keyword instance_type: Compute instance type.
- :paramtype instance_type: str
- :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
- :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword model: The URI path to the model.
- :paramtype model: str
- :keyword model_mount_path: The path to mount the model in custom container.
- :paramtype model_mount_path: str
- :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword request_settings: Request settings for the deployment.
- :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :keyword scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
- :keyword container_resource_requirements: The resource requirements for the container (cpu and
- memory).
- :paramtype container_resource_requirements:
- ~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
+ :keyword max_concurrent_trials: Maximum Concurrent AutoML iterations.
+ :paramtype max_concurrent_trials: int
+ :keyword max_nodes: Maximum nodes to use for the experiment.
+ :paramtype max_nodes: int
+ :keyword max_trials: Number of AutoML iterations.
+ :paramtype max_trials: int
+ :keyword timeout: AutoML job timeout.
+ :paramtype timeout: ~datetime.timedelta
+ :keyword trial_timeout: Timeout for individual HD trials.
+ :paramtype trial_timeout: ~datetime.timedelta
"""
- super().__init__(
- code_configuration=code_configuration,
- description=description,
- environment_id=environment_id,
- environment_variables=environment_variables,
- properties=properties,
- app_insights_enabled=app_insights_enabled,
- egress_public_network_access=egress_public_network_access,
- instance_type=instance_type,
- liveness_probe=liveness_probe,
- model=model,
- model_mount_path=model_mount_path,
- readiness_probe=readiness_probe,
- request_settings=request_settings,
- scale_settings=scale_settings,
- **kwargs
- )
- self.endpoint_compute_type: str = "Kubernetes"
- self.container_resource_requirements = container_resource_requirements
+ super().__init__(**kwargs)
+ self.max_concurrent_trials = max_concurrent_trials
+ self.max_nodes = max_nodes
+ self.max_trials = max_trials
+ self.timeout = timeout
+ self.trial_timeout = trial_timeout
+
+
+class NodeStateCounts(_serialization.Model):
+ """Counts of various compute node states on the amlCompute.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar idle_node_count: Number of compute nodes in idle state.
+ :vartype idle_node_count: int
+ :ivar running_node_count: Number of compute nodes which are running jobs.
+ :vartype running_node_count: int
+ :ivar preparing_node_count: Number of compute nodes which are being prepared.
+ :vartype preparing_node_count: int
+ :ivar unusable_node_count: Number of compute nodes which are in unusable state.
+ :vartype unusable_node_count: int
+ :ivar leaving_node_count: Number of compute nodes which are leaving the amlCompute.
+ :vartype leaving_node_count: int
+ :ivar preempted_node_count: Number of compute nodes which are in preempted state.
+ :vartype preempted_node_count: int
+ """
+
+ _validation = {
+ "idle_node_count": {"readonly": True},
+ "running_node_count": {"readonly": True},
+ "preparing_node_count": {"readonly": True},
+ "unusable_node_count": {"readonly": True},
+ "leaving_node_count": {"readonly": True},
+ "preempted_node_count": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "idle_node_count": {"key": "idleNodeCount", "type": "int"},
+ "running_node_count": {"key": "runningNodeCount", "type": "int"},
+ "preparing_node_count": {"key": "preparingNodeCount", "type": "int"},
+ "unusable_node_count": {"key": "unusableNodeCount", "type": "int"},
+ "leaving_node_count": {"key": "leavingNodeCount", "type": "int"},
+ "preempted_node_count": {"key": "preemptedNodeCount", "type": "int"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.idle_node_count = None
+ self.running_node_count = None
+ self.preparing_node_count = None
+ self.unusable_node_count = None
+ self.leaving_node_count = None
+ self.preempted_node_count = None
-class KubernetesProperties(_serialization.Model):
- """Kubernetes properties.
+class NoneAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """NoneAuthTypeWorkspaceConnectionProperties.
- :ivar relay_connection_string: Relay connection string.
- :vartype relay_connection_string: str
- :ivar service_bus_connection_string: ServiceBus connection string.
- :vartype service_bus_connection_string: str
- :ivar extension_principal_id: Extension principal-id.
- :vartype extension_principal_id: str
- :ivar extension_instance_release_train: Extension instance release train.
- :vartype extension_instance_release_train: str
- :ivar vc_name: VC name.
- :vartype vc_name: str
- :ivar namespace: Compute namespace.
- :vartype namespace: str
- :ivar default_instance_type: Default instance type.
- :vartype default_instance_type: str
- :ivar instance_types: Instance Type Schema.
- :vartype instance_types: dict[str,
- ~azure.mgmt.machinelearningservices.models.InstanceTypeSchema]
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
+ :ivar target:
+ :vartype target: str
"""
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ }
+
_attribute_map = {
- "relay_connection_string": {"key": "relayConnectionString", "type": "str"},
- "service_bus_connection_string": {"key": "serviceBusConnectionString", "type": "str"},
- "extension_principal_id": {"key": "extensionPrincipalId", "type": "str"},
- "extension_instance_release_train": {"key": "extensionInstanceReleaseTrain", "type": "str"},
- "vc_name": {"key": "vcName", "type": "str"},
- "namespace": {"key": "namespace", "type": "str"},
- "default_instance_type": {"key": "defaultInstanceType", "type": "str"},
- "instance_types": {"key": "instanceTypes", "type": "{InstanceTypeSchema}"},
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
+ "target": {"key": "target", "type": "str"},
}
def __init__(
self,
*,
- relay_connection_string: Optional[str] = None,
- service_bus_connection_string: Optional[str] = None,
- extension_principal_id: Optional[str] = None,
- extension_instance_release_train: Optional[str] = None,
- vc_name: Optional[str] = None,
- namespace: str = "default",
- default_instance_type: Optional[str] = None,
- instance_types: Optional[Dict[str, "_models.InstanceTypeSchema"]] = None,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
+ target: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword relay_connection_string: Relay connection string.
- :paramtype relay_connection_string: str
- :keyword service_bus_connection_string: ServiceBus connection string.
- :paramtype service_bus_connection_string: str
- :keyword extension_principal_id: Extension principal-id.
- :paramtype extension_principal_id: str
- :keyword extension_instance_release_train: Extension instance release train.
- :paramtype extension_instance_release_train: str
- :keyword vc_name: VC name.
- :paramtype vc_name: str
- :keyword namespace: Compute namespace.
- :paramtype namespace: str
- :keyword default_instance_type: Default instance type.
- :paramtype default_instance_type: str
- :keyword instance_types: Instance Type Schema.
- :paramtype instance_types: dict[str,
- ~azure.mgmt.machinelearningservices.models.InstanceTypeSchema]
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
+ :keyword target:
+ :paramtype target: str
"""
- super().__init__(**kwargs)
- self.relay_connection_string = relay_connection_string
- self.service_bus_connection_string = service_bus_connection_string
- self.extension_principal_id = extension_principal_id
- self.extension_instance_release_train = extension_instance_release_train
- self.vc_name = vc_name
- self.namespace = namespace
- self.default_instance_type = default_instance_type
- self.instance_types = instance_types
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ metadata=metadata,
+ target=target,
+ **kwargs
+ )
+ self.auth_type: str = "None"
-class ListAmlUserFeatureResult(_serialization.Model):
- """The List Aml user feature operation response.
+class NoneDatastoreCredentials(DatastoreCredentials):
+ """Empty/none datastore credentials.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar value: The list of AML user facing features.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.AmlUserFeature]
- :ivar next_link: The URI to fetch the next page of AML user features information. Call
- ListNext() with this to fetch the next page of AML user features information.
- :vartype next_link: str
+ :ivar credentials_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", "ServicePrincipal",
+ "KerberosKeytab", and "KerberosPassword".
+ :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
"""
_validation = {
- "value": {"readonly": True},
- "next_link": {"readonly": True},
+ "credentials_type": {"required": True},
}
_attribute_map = {
- "value": {"key": "value", "type": "[AmlUserFeature]"},
- "next_link": {"key": "nextLink", "type": "str"},
+ "credentials_type": {"key": "credentialsType", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
- self.value = None
- self.next_link = None
+ self.credentials_type: str = "None"
-class ListNotebookKeysResult(_serialization.Model):
- """ListNotebookKeysResult.
+class NotebookAccessTokenResult(_serialization.Model):
+ """NotebookAccessTokenResult.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar primary_access_key:
- :vartype primary_access_key: str
- :ivar secondary_access_key:
- :vartype secondary_access_key: str
+ :ivar access_token:
+ :vartype access_token: str
+ :ivar expires_in:
+ :vartype expires_in: int
+ :ivar host_name:
+ :vartype host_name: str
+ :ivar notebook_resource_id:
+ :vartype notebook_resource_id: str
+ :ivar public_dns:
+ :vartype public_dns: str
+ :ivar refresh_token:
+ :vartype refresh_token: str
+ :ivar scope:
+ :vartype scope: str
+ :ivar token_type:
+ :vartype token_type: str
"""
_validation = {
- "primary_access_key": {"readonly": True},
- "secondary_access_key": {"readonly": True},
+ "access_token": {"readonly": True},
+ "expires_in": {"readonly": True},
+ "host_name": {"readonly": True},
+ "notebook_resource_id": {"readonly": True},
+ "public_dns": {"readonly": True},
+ "refresh_token": {"readonly": True},
+ "scope": {"readonly": True},
+ "token_type": {"readonly": True},
}
_attribute_map = {
- "primary_access_key": {"key": "primaryAccessKey", "type": "str"},
- "secondary_access_key": {"key": "secondaryAccessKey", "type": "str"},
+ "access_token": {"key": "accessToken", "type": "str"},
+ "expires_in": {"key": "expiresIn", "type": "int"},
+ "host_name": {"key": "hostName", "type": "str"},
+ "notebook_resource_id": {"key": "notebookResourceId", "type": "str"},
+ "public_dns": {"key": "publicDns", "type": "str"},
+ "refresh_token": {"key": "refreshToken", "type": "str"},
+ "scope": {"key": "scope", "type": "str"},
+ "token_type": {"key": "tokenType", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
- self.primary_access_key = None
- self.secondary_access_key = None
-
+ self.access_token = None
+ self.expires_in = None
+ self.host_name = None
+ self.notebook_resource_id = None
+ self.public_dns = None
+ self.refresh_token = None
+ self.scope = None
+ self.token_type = None
-class ListStorageAccountKeysResult(_serialization.Model):
- """ListStorageAccountKeysResult.
- Variables are only populated by the server, and will be ignored when sending a request.
+class NotebookPreparationError(_serialization.Model):
+ """NotebookPreparationError.
- :ivar user_storage_key:
- :vartype user_storage_key: str
+ :ivar error_message:
+ :vartype error_message: str
+ :ivar status_code:
+ :vartype status_code: int
"""
- _validation = {
- "user_storage_key": {"readonly": True},
- }
-
_attribute_map = {
- "user_storage_key": {"key": "userStorageKey", "type": "str"},
+ "error_message": {"key": "errorMessage", "type": "str"},
+ "status_code": {"key": "statusCode", "type": "int"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self, *, error_message: Optional[str] = None, status_code: Optional[int] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword error_message:
+ :paramtype error_message: str
+ :keyword status_code:
+ :paramtype status_code: int
+ """
super().__init__(**kwargs)
- self.user_storage_key = None
-
+ self.error_message = error_message
+ self.status_code = status_code
-class ListUsagesResult(_serialization.Model):
- """The List Usages operation response.
- Variables are only populated by the server, and will be ignored when sending a request.
+class NotebookResourceInfo(_serialization.Model):
+ """NotebookResourceInfo.
- :ivar value: The list of AML resource usages.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.Usage]
- :ivar next_link: The URI to fetch the next page of AML resource usage information. Call
- ListNext() with this to fetch the next page of AML resource usage information.
- :vartype next_link: str
+ :ivar fqdn:
+ :vartype fqdn: str
+ :ivar is_private_link_enabled:
+ :vartype is_private_link_enabled: bool
+ :ivar notebook_preparation_error: The error that occurs when preparing notebook.
+ :vartype notebook_preparation_error:
+ ~azure.mgmt.machinelearningservices.models.NotebookPreparationError
+ :ivar resource_id: the data plane resourceId that used to initialize notebook component.
+ :vartype resource_id: str
"""
- _validation = {
- "value": {"readonly": True},
- "next_link": {"readonly": True},
+ _attribute_map = {
+ "fqdn": {"key": "fqdn", "type": "str"},
+ "is_private_link_enabled": {"key": "isPrivateLinkEnabled", "type": "bool"},
+ "notebook_preparation_error": {"key": "notebookPreparationError", "type": "NotebookPreparationError"},
+ "resource_id": {"key": "resourceId", "type": "str"},
}
+ def __init__(
+ self,
+ *,
+ fqdn: Optional[str] = None,
+ is_private_link_enabled: Optional[bool] = None,
+ notebook_preparation_error: Optional["_models.NotebookPreparationError"] = None,
+ resource_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword fqdn:
+ :paramtype fqdn: str
+ :keyword is_private_link_enabled:
+ :paramtype is_private_link_enabled: bool
+ :keyword notebook_preparation_error: The error that occurs when preparing notebook.
+ :paramtype notebook_preparation_error:
+ ~azure.mgmt.machinelearningservices.models.NotebookPreparationError
+ :keyword resource_id: the data plane resourceId that used to initialize notebook component.
+ :paramtype resource_id: str
+ """
+ super().__init__(**kwargs)
+ self.fqdn = fqdn
+ self.is_private_link_enabled = is_private_link_enabled
+ self.notebook_preparation_error = notebook_preparation_error
+ self.resource_id = resource_id
+
+
+class NotificationSetting(_serialization.Model):
+ """Configuration for notification.
+
+ :ivar email_on: Send email notification to user on specified notification type.
+ :vartype email_on: list[str or
+ ~azure.mgmt.machinelearningservices.models.EmailNotificationEnableType]
+ :ivar emails: This is the email recipient list which has a limitation of 499 characters in
+ total concat with comma separator.
+ :vartype emails: list[str]
+ :ivar webhooks: Send webhook callback to a service. Key is a user-provided name for the
+ webhook.
+ :vartype webhooks: dict[str, ~azure.mgmt.machinelearningservices.models.Webhook]
+ """
+
_attribute_map = {
- "value": {"key": "value", "type": "[Usage]"},
- "next_link": {"key": "nextLink", "type": "str"},
+ "email_on": {"key": "emailOn", "type": "[str]"},
+ "emails": {"key": "emails", "type": "[str]"},
+ "webhooks": {"key": "webhooks", "type": "{Webhook}"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self,
+ *,
+ email_on: Optional[List[Union[str, "_models.EmailNotificationEnableType"]]] = None,
+ emails: Optional[List[str]] = None,
+ webhooks: Optional[Dict[str, "_models.Webhook"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword email_on: Send email notification to user on specified notification type.
+ :paramtype email_on: list[str or
+ ~azure.mgmt.machinelearningservices.models.EmailNotificationEnableType]
+ :keyword emails: This is the email recipient list which has a limitation of 499 characters in
+ total concat with comma separator.
+ :paramtype emails: list[str]
+ :keyword webhooks: Send webhook callback to a service. Key is a user-provided name for the
+ webhook.
+ :paramtype webhooks: dict[str, ~azure.mgmt.machinelearningservices.models.Webhook]
+ """
super().__init__(**kwargs)
- self.value = None
- self.next_link = None
+ self.email_on = email_on
+ self.emails = emails
+ self.webhooks = webhooks
-class ListWorkspaceKeysResult(_serialization.Model):
- """ListWorkspaceKeysResult.
+class NumericalDataDriftMetricThreshold(DataDriftMetricThresholdBase):
+ """NumericalDataDriftMetricThreshold.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar user_storage_key:
- :vartype user_storage_key: str
- :ivar user_storage_resource_id:
- :vartype user_storage_resource_id: str
- :ivar app_insights_instrumentation_key:
- :vartype app_insights_instrumentation_key: str
- :ivar container_registry_credentials:
- :vartype container_registry_credentials:
- ~azure.mgmt.machinelearningservices.models.RegistryListCredentialsResult
- :ivar notebook_access_keys:
- :vartype notebook_access_keys:
- ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The numerical data drift metric to calculate. Required. Known values
+ are: "JensenShannonDistance", "PopulationStabilityIndex", "NormalizedWassersteinDistance", and
+ "TwoSampleKolmogorovSmirnovTest".
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.NumericalDataDriftMetric
"""
_validation = {
- "user_storage_key": {"readonly": True},
- "user_storage_resource_id": {"readonly": True},
- "app_insights_instrumentation_key": {"readonly": True},
- "container_registry_credentials": {"readonly": True},
- "notebook_access_keys": {"readonly": True},
+ "data_type": {"required": True},
+ "metric": {"required": True},
}
_attribute_map = {
- "user_storage_key": {"key": "userStorageKey", "type": "str"},
- "user_storage_resource_id": {"key": "userStorageResourceId", "type": "str"},
- "app_insights_instrumentation_key": {"key": "appInsightsInstrumentationKey", "type": "str"},
- "container_registry_credentials": {
- "key": "containerRegistryCredentials",
- "type": "RegistryListCredentialsResult",
- },
- "notebook_access_keys": {"key": "notebookAccessKeys", "type": "ListNotebookKeysResult"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.user_storage_key = None
- self.user_storage_resource_id = None
- self.app_insights_instrumentation_key = None
- self.container_registry_credentials = None
- self.notebook_access_keys = None
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.NumericalDataDriftMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The numerical data drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex",
+ "NormalizedWassersteinDistance", and "TwoSampleKolmogorovSmirnovTest".
+ :paramtype metric: str or ~azure.mgmt.machinelearningservices.models.NumericalDataDriftMetric
+ """
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Numerical"
+ self.metric = metric
-class ListWorkspaceQuotas(_serialization.Model):
- """The List WorkspaceQuotasByVMFamily operation response.
+class NumericalDataQualityMetricThreshold(DataQualityMetricThresholdBase):
+ """NumericalDataQualityMetricThreshold.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar value: The list of Workspace Quotas by VM Family.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ResourceQuota]
- :ivar next_link: The URI to fetch the next page of workspace quota information by VM Family.
- Call ListNext() with this to fetch the next page of Workspace Quota information.
- :vartype next_link: str
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The numerical data quality metric to calculate. Required. Known values
+ are: "NullValueRate", "DataTypeErrorRate", and "OutOfBoundsRate".
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.NumericalDataQualityMetric
"""
_validation = {
- "value": {"readonly": True},
- "next_link": {"readonly": True},
+ "data_type": {"required": True},
+ "metric": {"required": True},
}
_attribute_map = {
- "value": {"key": "value", "type": "[ResourceQuota]"},
- "next_link": {"key": "nextLink", "type": "str"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.value = None
- self.next_link = None
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.NumericalDataQualityMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The numerical data quality metric to calculate. Required. Known
+ values are: "NullValueRate", "DataTypeErrorRate", and "OutOfBoundsRate".
+ :paramtype metric: str or ~azure.mgmt.machinelearningservices.models.NumericalDataQualityMetric
+ """
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Numerical"
+ self.metric = metric
-class LiteralJobInput(JobInput):
- """Literal input type.
+class NumericalPredictionDriftMetricThreshold(PredictionDriftMetricThresholdBase):
+ """NumericalPredictionDriftMetricThreshold.
All required parameters must be populated in order to send to Azure.
- :ivar description: Description for the input.
- :vartype description: str
- :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
- "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
- "triton_model".
- :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
- :ivar value: [Required] Literal value for the input. Required.
- :vartype value: str
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The numerical prediction drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex",
+ "NormalizedWassersteinDistance", and "TwoSampleKolmogorovSmirnovTest".
+ :vartype metric: str or
+ ~azure.mgmt.machinelearningservices.models.NumericalPredictionDriftMetric
"""
_validation = {
- "job_input_type": {"required": True},
- "value": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "data_type": {"required": True},
+ "metric": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_input_type": {"key": "jobInputType", "type": "str"},
- "value": {"key": "value", "type": "str"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
- def __init__(self, *, value: str, description: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.NumericalPredictionDriftMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword description: Description for the input.
- :paramtype description: str
- :keyword value: [Required] Literal value for the input. Required.
- :paramtype value: str
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The numerical prediction drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex",
+ "NormalizedWassersteinDistance", and "TwoSampleKolmogorovSmirnovTest".
+ :paramtype metric: str or
+ ~azure.mgmt.machinelearningservices.models.NumericalPredictionDriftMetric
"""
- super().__init__(description=description, **kwargs)
- self.job_input_type: str = "literal"
- self.value = value
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Numerical"
+ self.metric = metric
-class ManagedIdentity(IdentityConfiguration):
- """Managed identity configuration.
+class Objective(_serialization.Model):
+ """Optimization objective.
All required parameters must be populated in order to send to Azure.
- :ivar identity_type: [Required] Specifies the type of identity framework. Required. Known
- values are: "Managed", "AMLToken", and "UserIdentity".
- :vartype identity_type: str or
- ~azure.mgmt.machinelearningservices.models.IdentityConfigurationType
- :ivar client_id: Specifies a user-assigned identity by client ID. For system-assigned, do not
- set this field.
- :vartype client_id: str
- :ivar object_id: Specifies a user-assigned identity by object ID. For system-assigned, do not
- set this field.
- :vartype object_id: str
- :ivar resource_id: Specifies a user-assigned identity by ARM resource ID. For system-assigned,
- do not set this field.
- :vartype resource_id: str
+ :ivar goal: [Required] Defines supported metric goals for hyperparameter tuning. Required.
+ Known values are: "Minimize" and "Maximize".
+ :vartype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
+ :ivar primary_metric: [Required] Name of the metric to optimize. Required.
+ :vartype primary_metric: str
"""
_validation = {
- "identity_type": {"required": True},
+ "goal": {"required": True},
+ "primary_metric": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "identity_type": {"key": "identityType", "type": "str"},
- "client_id": {"key": "clientId", "type": "str"},
- "object_id": {"key": "objectId", "type": "str"},
- "resource_id": {"key": "resourceId", "type": "str"},
+ "goal": {"key": "goal", "type": "str"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
}
- def __init__(
- self,
- *,
- client_id: Optional[str] = None,
- object_id: Optional[str] = None,
- resource_id: Optional[str] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, goal: Union[str, "_models.Goal"], primary_metric: str, **kwargs: Any) -> None:
"""
- :keyword client_id: Specifies a user-assigned identity by client ID. For system-assigned, do
- not set this field.
- :paramtype client_id: str
- :keyword object_id: Specifies a user-assigned identity by object ID. For system-assigned, do
- not set this field.
- :paramtype object_id: str
- :keyword resource_id: Specifies a user-assigned identity by ARM resource ID. For
- system-assigned, do not set this field.
- :paramtype resource_id: str
+ :keyword goal: [Required] Defines supported metric goals for hyperparameter tuning. Required.
+ Known values are: "Minimize" and "Maximize".
+ :paramtype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
+ :keyword primary_metric: [Required] Name of the metric to optimize. Required.
+ :paramtype primary_metric: str
"""
super().__init__(**kwargs)
- self.identity_type: str = "Managed"
- self.client_id = client_id
- self.object_id = object_id
- self.resource_id = resource_id
+ self.goal = goal
+ self.primary_metric = primary_metric
-class WorkspaceConnectionPropertiesV2(_serialization.Model):
- """WorkspaceConnectionPropertiesV2.
+class OneLakeDatastore(DatastoreProperties): # pylint: disable=too-many-instance-attributes
+ """OneLake (Trident) datastore configuration.
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- ManagedIdentityAuthTypeWorkspaceConnectionProperties,
- NoneAuthTypeWorkspaceConnectionProperties, PATAuthTypeWorkspaceConnectionProperties,
- SASAuthTypeWorkspaceConnectionProperties, UsernamePasswordAuthTypeWorkspaceConnectionProperties
+ Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
- :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
- :ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :ivar target:
- :vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar credentials: [Required] Account credentials. Required.
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", "Hdfs", and "OneLake".
+ :vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
+ :ivar intellectual_property: Intellectual Property details.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :ivar is_default: Readonly property to indicate if datastore is the workspace default
+ datastore.
+ :vartype is_default: bool
+ :ivar artifact: [Required] OneLake artifact backing the datastore. Required.
+ :vartype artifact: ~azure.mgmt.machinelearningservices.models.OneLakeArtifact
+ :ivar endpoint: OneLake endpoint to use for the datastore.
+ :vartype endpoint: str
+ :ivar one_lake_workspace_name: [Required] OneLake workspace name. Required.
+ :vartype one_lake_workspace_name: str
+ :ivar service_data_access_auth_identity: Indicates which identity to use to authenticate
+ service data access to customer's storage. Known values are: "None",
+ "WorkspaceSystemAssignedIdentity", and "WorkspaceUserAssignedIdentity".
+ :vartype service_data_access_auth_identity: str or
+ ~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
_validation = {
- "auth_type": {"required": True},
+ "credentials": {"required": True},
+ "datastore_type": {"required": True},
+ "is_default": {"readonly": True},
+ "artifact": {"required": True},
+ "one_lake_workspace_name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "auth_type": {"key": "authType", "type": "str"},
- "category": {"key": "category", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
- }
-
- _subtype_map = {
- "auth_type": {
- "ManagedIdentity": "ManagedIdentityAuthTypeWorkspaceConnectionProperties",
- "None": "NoneAuthTypeWorkspaceConnectionProperties",
- "PAT": "PATAuthTypeWorkspaceConnectionProperties",
- "SAS": "SASAuthTypeWorkspaceConnectionProperties",
- "UsernamePassword": "UsernamePasswordAuthTypeWorkspaceConnectionProperties",
- }
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "credentials": {"key": "credentials", "type": "DatastoreCredentials"},
+ "datastore_type": {"key": "datastoreType", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
+ "is_default": {"key": "isDefault", "type": "bool"},
+ "artifact": {"key": "artifact", "type": "OneLakeArtifact"},
+ "endpoint": {"key": "endpoint", "type": "str"},
+ "one_lake_workspace_name": {"key": "oneLakeWorkspaceName", "type": "str"},
+ "service_data_access_auth_identity": {"key": "serviceDataAccessAuthIdentity", "type": "str"},
}
def __init__(
self,
*,
- category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
- target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ credentials: "_models.DatastoreCredentials",
+ artifact: "_models.OneLakeArtifact",
+ one_lake_workspace_name: str,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ endpoint: Optional[str] = None,
+ service_data_access_auth_identity: Optional[Union[str, "_models.ServiceDataAccessAuthIdentity"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :keyword target:
- :paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword credentials: [Required] Account credentials. Required.
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword intellectual_property: Intellectual Property details.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword artifact: [Required] OneLake artifact backing the datastore. Required.
+ :paramtype artifact: ~azure.mgmt.machinelearningservices.models.OneLakeArtifact
+ :keyword endpoint: OneLake endpoint to use for the datastore.
+ :paramtype endpoint: str
+ :keyword one_lake_workspace_name: [Required] OneLake workspace name. Required.
+ :paramtype one_lake_workspace_name: str
+ :keyword service_data_access_auth_identity: Indicates which identity to use to authenticate
+ service data access to customer's storage. Known values are: "None",
+ "WorkspaceSystemAssignedIdentity", and "WorkspaceUserAssignedIdentity".
+ :paramtype service_data_access_auth_identity: str or
+ ~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
- super().__init__(**kwargs)
- self.auth_type: Optional[str] = None
- self.category = category
- self.target = target
- self.value = value
- self.value_format = value_format
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ intellectual_property=intellectual_property,
+ **kwargs
+ )
+ self.datastore_type: str = "OneLake"
+ self.artifact = artifact
+ self.endpoint = endpoint
+ self.one_lake_workspace_name = one_lake_workspace_name
+ self.service_data_access_auth_identity = service_data_access_auth_identity
-class ManagedIdentityAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
- """ManagedIdentityAuthTypeWorkspaceConnectionProperties.
+class OnlineDeployment(TrackedResource):
+ """OnlineDeployment.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
- :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
- :ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :ivar target:
- :vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :ivar credentials:
- :vartype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionManagedIdentity
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :vartype kind: str
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentProperties
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
- "auth_type": {"required": True},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "auth_type": {"key": "authType", "type": "str"},
- "category": {"key": "category", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
- "credentials": {"key": "credentials", "type": "WorkspaceConnectionManagedIdentity"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
+ "properties": {"key": "properties", "type": "OnlineDeploymentProperties"},
+ "sku": {"key": "sku", "type": "Sku"},
}
def __init__(
self,
*,
- category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
- target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
- credentials: Optional["_models.WorkspaceConnectionManagedIdentity"] = None,
+ location: str,
+ properties: "_models.OnlineDeploymentProperties",
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
+ sku: Optional["_models.Sku"] = None,
**kwargs: Any
) -> None:
"""
- :keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :keyword target:
- :paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :keyword credentials:
- :paramtype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionManagedIdentity
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :paramtype kind: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentProperties
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
- self.auth_type: str = "ManagedIdentity"
- self.credentials = credentials
-
-
-class ManagedOnlineDeployment(OnlineDeploymentProperties): # pylint: disable=too-many-instance-attributes
- """Properties specific to a ManagedOnlineDeployment.
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.identity = identity
+ self.kind = kind
+ self.properties = properties
+ self.sku = sku
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class OnlineDeploymentTrackedResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of OnlineDeployment entities.
- :ivar code_configuration: Code configuration for the endpoint deployment.
- :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :ivar description: Description of the endpoint deployment.
- :vartype description: str
- :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :vartype environment_id: str
- :ivar environment_variables: Environment variables configuration for the deployment.
- :vartype environment_variables: dict[str, str]
- :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
- :vartype properties: dict[str, str]
- :ivar app_insights_enabled: If true, enables Application Insights logging.
- :vartype app_insights_enabled: bool
- :ivar egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :vartype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
- values are: "Managed", "Kubernetes", and "AzureMLCompute".
- :vartype endpoint_compute_type: str or
- ~azure.mgmt.machinelearningservices.models.EndpointComputeType
- :ivar instance_type: Compute instance type.
- :vartype instance_type: str
- :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
- :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar model: The URI path to the model.
- :vartype model: str
- :ivar model_mount_path: The path to mount the model in custom container.
- :vartype model_mount_path: str
- :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
- "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
- :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar request_settings: Request settings for the deployment.
- :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :ivar scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ :ivar next_link: The link to the next page of OnlineDeployment objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type OnlineDeployment.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
"""
- _validation = {
- "endpoint_compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
- }
-
_attribute_map = {
- "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
- "description": {"key": "description", "type": "str"},
- "environment_id": {"key": "environmentId", "type": "str"},
- "environment_variables": {"key": "environmentVariables", "type": "{str}"},
- "properties": {"key": "properties", "type": "{str}"},
- "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
- "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
- "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
- "model": {"key": "model", "type": "str"},
- "model_mount_path": {"key": "modelMountPath", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
- "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
- "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[OnlineDeployment]"},
}
def __init__(
self,
*,
- code_configuration: Optional["_models.CodeConfiguration"] = None,
- description: Optional[str] = None,
- environment_id: Optional[str] = None,
- environment_variables: Optional[Dict[str, str]] = None,
- properties: Optional[Dict[str, str]] = None,
- app_insights_enabled: bool = False,
- egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
- instance_type: Optional[str] = None,
- liveness_probe: Optional["_models.ProbeSettings"] = None,
- model: Optional[str] = None,
- model_mount_path: Optional[str] = None,
- readiness_probe: Optional["_models.ProbeSettings"] = None,
- request_settings: Optional["_models.OnlineRequestSettings"] = None,
- scale_settings: Optional["_models.OnlineScaleSettings"] = None,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.OnlineDeployment"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword code_configuration: Code configuration for the endpoint deployment.
- :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :keyword description: Description of the endpoint deployment.
- :paramtype description: str
- :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :paramtype environment_id: str
- :keyword environment_variables: Environment variables configuration for the deployment.
- :paramtype environment_variables: dict[str, str]
- :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
- :paramtype properties: dict[str, str]
- :keyword app_insights_enabled: If true, enables Application Insights logging.
- :paramtype app_insights_enabled: bool
- :keyword egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :paramtype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :keyword instance_type: Compute instance type.
- :paramtype instance_type: str
- :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
- :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword model: The URI path to the model.
- :paramtype model: str
- :keyword model_mount_path: The path to mount the model in custom container.
- :paramtype model_mount_path: str
- :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword request_settings: Request settings for the deployment.
- :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :keyword scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
- """
- super().__init__(
- code_configuration=code_configuration,
- description=description,
- environment_id=environment_id,
- environment_variables=environment_variables,
- properties=properties,
- app_insights_enabled=app_insights_enabled,
- egress_public_network_access=egress_public_network_access,
- instance_type=instance_type,
- liveness_probe=liveness_probe,
- model=model,
- model_mount_path=model_mount_path,
- readiness_probe=readiness_probe,
- request_settings=request_settings,
- scale_settings=scale_settings,
- **kwargs
- )
- self.endpoint_compute_type: str = "Managed"
+ :keyword next_link: The link to the next page of OnlineDeployment objects. If null, there are
+ no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type OnlineDeployment.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
-class ManagedServiceIdentity(_serialization.Model):
- """Managed service identity (system assigned and/or user assigned identities).
+class OnlineEndpoint(TrackedResource):
+ """OnlineEndpoint.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar principal_id: The service principal ID of the system assigned identity. This property
- will only be provided for a system assigned identity.
- :vartype principal_id: str
- :ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
- provided for a system assigned identity.
- :vartype tenant_id: str
- :ivar type: Type of managed service identity (where both SystemAssigned and UserAssigned types
- are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
- "SystemAssigned,UserAssigned".
- :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :ivar user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :vartype user_assigned_identities: dict[str,
- ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :vartype kind: str
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointProperties
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
- "principal_id": {"readonly": True},
- "tenant_id": {"readonly": True},
- "type": {"required": True},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "principal_id": {"key": "principalId", "type": "str"},
- "tenant_id": {"key": "tenantId", "type": "str"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
- "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentity}"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
+ "properties": {"key": "properties", "type": "OnlineEndpointProperties"},
+ "sku": {"key": "sku", "type": "Sku"},
}
def __init__(
self,
*,
- type: Union[str, "_models.ManagedServiceIdentityType"],
- user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None,
+ location: str,
+ properties: "_models.OnlineEndpointProperties",
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
+ sku: Optional["_models.Sku"] = None,
**kwargs: Any
) -> None:
"""
- :keyword type: Type of managed service identity (where both SystemAssigned and UserAssigned
- types are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
- "SystemAssigned,UserAssigned".
- :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :keyword user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :paramtype user_assigned_identities: dict[str,
- ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :paramtype kind: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointProperties
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
- super().__init__(**kwargs)
- self.principal_id = None
- self.tenant_id = None
- self.type = type
- self.user_assigned_identities = user_assigned_identities
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.identity = identity
+ self.kind = kind
+ self.properties = properties
+ self.sku = sku
-class MedianStoppingPolicy(EarlyTerminationPolicy):
- """Defines an early termination policy based on running averages of the primary metric of all
- runs.
+class OnlineEndpointProperties(EndpointPropertiesBase): # pylint: disable=too-many-instance-attributes
+ """Online endpoint configuration.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
- :vartype delay_evaluation: int
- :ivar evaluation_interval: Interval (number of runs) between policy evaluations.
- :vartype evaluation_interval: int
- :ivar policy_type: [Required] Name of policy configuration. Required. Known values are:
- "Bandit", "MedianStopping", and "TruncationSelection".
- :vartype policy_type: str or
- ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
+ :ivar auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
+ Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
+ Required. Known values are: "AMLToken", "Key", and "AADToken".
+ :vartype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
+ :ivar description: Description of the inference endpoint.
+ :vartype description: str
+ :ivar keys: EndpointAuthKeys to set initially on an Endpoint.
+ This property will always be returned as null. AuthKey values must be retrieved using the
+ ListKeys API.
+ :vartype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar scoring_uri: Endpoint URI.
+ :vartype scoring_uri: str
+ :ivar swagger_uri: Endpoint Swagger URI.
+ :vartype swagger_uri: str
+ :ivar compute: ARM resource ID of the compute if it exists.
+ optional.
+ :vartype compute: str
+ :ivar mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
+ returned scoring. Traffic values need to sum to utmost 50.
+ :vartype mirror_traffic: dict[str, int]
+ :ivar provisioning_state: Provisioning state for the endpoint. Known values are: "Creating",
+ "Deleting", "Succeeded", "Failed", "Updating", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointProvisioningState
+ :ivar public_network_access: Set to "Enabled" for endpoints that should allow public access
+ when Private Link is enabled. Known values are: "Enabled" and "Disabled".
+ :vartype public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
+ :ivar traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic values
+ need to sum to 100.
+ :vartype traffic: dict[str, int]
"""
_validation = {
- "policy_type": {"required": True},
+ "auth_mode": {"required": True},
+ "scoring_uri": {"readonly": True},
+ "swagger_uri": {"readonly": True},
+ "provisioning_state": {"readonly": True},
}
_attribute_map = {
- "delay_evaluation": {"key": "delayEvaluation", "type": "int"},
- "evaluation_interval": {"key": "evaluationInterval", "type": "int"},
- "policy_type": {"key": "policyType", "type": "str"},
+ "auth_mode": {"key": "authMode", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "keys": {"key": "keys", "type": "EndpointAuthKeys"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "scoring_uri": {"key": "scoringUri", "type": "str"},
+ "swagger_uri": {"key": "swaggerUri", "type": "str"},
+ "compute": {"key": "compute", "type": "str"},
+ "mirror_traffic": {"key": "mirrorTraffic", "type": "{int}"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "public_network_access": {"key": "publicNetworkAccess", "type": "str"},
+ "traffic": {"key": "traffic", "type": "{int}"},
}
- def __init__(self, *, delay_evaluation: int = 0, evaluation_interval: int = 0, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ auth_mode: Union[str, "_models.EndpointAuthMode"],
+ description: Optional[str] = None,
+ keys: Optional["_models.EndpointAuthKeys"] = None,
+ properties: Optional[Dict[str, str]] = None,
+ compute: Optional[str] = None,
+ mirror_traffic: Optional[Dict[str, int]] = None,
+ public_network_access: Optional[Union[str, "_models.PublicNetworkAccessType"]] = None,
+ traffic: Optional[Dict[str, int]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword delay_evaluation: Number of intervals by which to delay the first evaluation.
- :paramtype delay_evaluation: int
- :keyword evaluation_interval: Interval (number of runs) between policy evaluations.
- :paramtype evaluation_interval: int
+ :keyword auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
+ Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
+ Required. Known values are: "AMLToken", "Key", and "AADToken".
+ :paramtype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
+ :keyword description: Description of the inference endpoint.
+ :paramtype description: str
+ :keyword keys: EndpointAuthKeys to set initially on an Endpoint.
+ This property will always be returned as null. AuthKey values must be retrieved using the
+ ListKeys API.
+ :paramtype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword compute: ARM resource ID of the compute if it exists.
+ optional.
+ :paramtype compute: str
+ :keyword mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
+ returned scoring. Traffic values need to sum to utmost 50.
+ :paramtype mirror_traffic: dict[str, int]
+ :keyword public_network_access: Set to "Enabled" for endpoints that should allow public access
+ when Private Link is enabled. Known values are: "Enabled" and "Disabled".
+ :paramtype public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
+ :keyword traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic
+ values need to sum to 100.
+ :paramtype traffic: dict[str, int]
"""
- super().__init__(delay_evaluation=delay_evaluation, evaluation_interval=evaluation_interval, **kwargs)
- self.policy_type: str = "MedianStopping"
+ super().__init__(auth_mode=auth_mode, description=description, keys=keys, properties=properties, **kwargs)
+ self.compute = compute
+ self.mirror_traffic = mirror_traffic
+ self.provisioning_state = None
+ self.public_network_access = public_network_access
+ self.traffic = traffic
+
+
+class OnlineEndpointTrackedResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of OnlineEndpoint entities.
+
+ :ivar next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type OnlineEndpoint.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[OnlineEndpoint]"},
+ }
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.OnlineEndpoint"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type OnlineEndpoint.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
-class MLFlowModelJobInput(AssetJobInput, JobInput):
- """MLFlowModelJobInput.
- All required parameters must be populated in order to send to Azure.
+class OnlineInferenceConfiguration(_serialization.Model):
+ """Online inference configuration options.
- :ivar description: Description for the input.
- :vartype description: str
- :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
- "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
- "triton_model".
- :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
- :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :ivar uri: [Required] Input Asset URI. Required.
- :vartype uri: str
+ :ivar configurations: Additional configurations.
+ :vartype configurations: dict[str, str]
+ :ivar entry_script: Entry script or command to invoke.
+ :vartype entry_script: str
+ :ivar liveness_route: The route to check the liveness of the inference server container.
+ :vartype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :ivar readiness_route: The route to check the readiness of the inference server container.
+ :vartype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :ivar scoring_route: The port to send the scoring requests to, within the inference server
+ container.
+ :vartype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
"""
- _validation = {
- "job_input_type": {"required": True},
- "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- }
-
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_input_type": {"key": "jobInputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
+ "configurations": {"key": "configurations", "type": "{str}"},
+ "entry_script": {"key": "entryScript", "type": "str"},
+ "liveness_route": {"key": "livenessRoute", "type": "Route"},
+ "readiness_route": {"key": "readinessRoute", "type": "Route"},
+ "scoring_route": {"key": "scoringRoute", "type": "Route"},
}
def __init__(
self,
*,
- uri: str,
- description: Optional[str] = None,
- mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
+ configurations: Optional[Dict[str, str]] = None,
+ entry_script: Optional[str] = None,
+ liveness_route: Optional["_models.Route"] = None,
+ readiness_route: Optional["_models.Route"] = None,
+ scoring_route: Optional["_models.Route"] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: Description for the input.
- :paramtype description: str
- :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :keyword uri: [Required] Input Asset URI. Required.
- :paramtype uri: str
+ :keyword configurations: Additional configurations.
+ :paramtype configurations: dict[str, str]
+ :keyword entry_script: Entry script or command to invoke.
+ :paramtype entry_script: str
+ :keyword liveness_route: The route to check the liveness of the inference server container.
+ :paramtype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :keyword readiness_route: The route to check the readiness of the inference server container.
+ :paramtype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :keyword scoring_route: The port to send the scoring requests to, within the inference server
+ container.
+ :paramtype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_input_type: str = "mlflow_model"
- self.mode = mode
- self.uri = uri
-
+ super().__init__(**kwargs)
+ self.configurations = configurations
+ self.entry_script = entry_script
+ self.liveness_route = liveness_route
+ self.readiness_route = readiness_route
+ self.scoring_route = scoring_route
-class MLFlowModelJobOutput(AssetJobOutput, JobOutput):
- """MLFlowModelJobOutput.
- All required parameters must be populated in order to send to Azure.
+class OnlineRequestSettings(_serialization.Model):
+ """Online deployment scoring requests configuration.
- :ivar description: Description for the output.
- :vartype description: str
- :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
- "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
- :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :ivar uri: Output Asset URI.
- :vartype uri: str
+ :ivar max_concurrent_requests_per_instance: The number of maximum concurrent requests per node
+ allowed per deployment. Defaults to 1.
+ :vartype max_concurrent_requests_per_instance: int
+ :ivar max_queue_wait: The maximum amount of time a request will stay in the queue in ISO 8601
+ format.
+ Defaults to 500ms.
+ :vartype max_queue_wait: ~datetime.timedelta
+ :ivar request_timeout: The scoring timeout in ISO 8601 format.
+ Defaults to 5000ms.
+ :vartype request_timeout: ~datetime.timedelta
"""
- _validation = {
- "job_output_type": {"required": True},
- }
-
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_output_type": {"key": "jobOutputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
+ "max_concurrent_requests_per_instance": {"key": "maxConcurrentRequestsPerInstance", "type": "int"},
+ "max_queue_wait": {"key": "maxQueueWait", "type": "duration"},
+ "request_timeout": {"key": "requestTimeout", "type": "duration"},
}
def __init__(
self,
*,
- description: Optional[str] = None,
- mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
- uri: Optional[str] = None,
+ max_concurrent_requests_per_instance: int = 1,
+ max_queue_wait: datetime.timedelta = "PT0.5S",
+ request_timeout: datetime.timedelta = "PT5S",
**kwargs: Any
) -> None:
"""
- :keyword description: Description for the output.
- :paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :keyword uri: Output Asset URI.
- :paramtype uri: str
+ :keyword max_concurrent_requests_per_instance: The number of maximum concurrent requests per
+ node allowed per deployment. Defaults to 1.
+ :paramtype max_concurrent_requests_per_instance: int
+ :keyword max_queue_wait: The maximum amount of time a request will stay in the queue in ISO
+ 8601 format.
+ Defaults to 500ms.
+ :paramtype max_queue_wait: ~datetime.timedelta
+ :keyword request_timeout: The scoring timeout in ISO 8601 format.
+ Defaults to 5000ms.
+ :paramtype request_timeout: ~datetime.timedelta
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_output_type: str = "mlflow_model"
- self.mode = mode
- self.uri = uri
+ super().__init__(**kwargs)
+ self.max_concurrent_requests_per_instance = max_concurrent_requests_per_instance
+ self.max_queue_wait = max_queue_wait
+ self.request_timeout = request_timeout
-class MLTableData(DataVersionBaseProperties):
- """MLTable data definition.
+class Operation(_serialization.Model):
+ """Details of a REST API operation, returned from the Resource Provider Operations API.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
- :vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
- "uri_folder", and "mltable".
- :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
- :ivar data_uri: [Required] Uri of the data. Example:
- https://go.microsoft.com/fwlink/?linkid=2202330. Required.
- :vartype data_uri: str
- :ivar referenced_uris: Uris referenced in the MLTable definition (required for lineage).
- :vartype referenced_uris: list[str]
+ :ivar name: The name of the operation, as per Resource-Based Access Control (RBAC). Examples:
+ "Microsoft.Compute/virtualMachines/write", "Microsoft.Compute/virtualMachines/capture/action".
+ :vartype name: str
+ :ivar is_data_action: Whether the operation applies to data-plane. This is "true" for
+ data-plane operations and "false" for ARM/control-plane operations.
+ :vartype is_data_action: bool
+ :ivar display: Localized display information for this particular operation.
+ :vartype display: ~azure.mgmt.machinelearningservices.models.OperationDisplay
+ :ivar origin: The intended executor of the operation; as in Resource Based Access Control
+ (RBAC) and audit logs UX. Default value is "user,system". Known values are: "user", "system",
+ and "user,system".
+ :vartype origin: str or ~azure.mgmt.machinelearningservices.models.Origin
+ :ivar action_type: Enum. Indicates the action type. "Internal" refers to actions that are for
+ internal only APIs. "Internal"
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ActionType
"""
_validation = {
- "data_type": {"required": True},
- "data_uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "name": {"readonly": True},
+ "is_data_action": {"readonly": True},
+ "origin": {"readonly": True},
+ "action_type": {"readonly": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "is_anonymous": {"key": "isAnonymous", "type": "bool"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "data_type": {"key": "dataType", "type": "str"},
- "data_uri": {"key": "dataUri", "type": "str"},
- "referenced_uris": {"key": "referencedUris", "type": "[str]"},
+ "name": {"key": "name", "type": "str"},
+ "is_data_action": {"key": "isDataAction", "type": "bool"},
+ "display": {"key": "display", "type": "OperationDisplay"},
+ "origin": {"key": "origin", "type": "str"},
+ "action_type": {"key": "actionType", "type": "str"},
}
- def __init__(
- self,
- *,
- data_uri: str,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- is_anonymous: bool = False,
- is_archived: bool = False,
- referenced_uris: Optional[List[str]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, display: Optional["_models.OperationDisplay"] = None, **kwargs: Any) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
- :paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword data_uri: [Required] Uri of the data. Example:
- https://go.microsoft.com/fwlink/?linkid=2202330. Required.
- :paramtype data_uri: str
- :keyword referenced_uris: Uris referenced in the MLTable definition (required for lineage).
- :paramtype referenced_uris: list[str]
+ :keyword display: Localized display information for this particular operation.
+ :paramtype display: ~azure.mgmt.machinelearningservices.models.OperationDisplay
"""
- super().__init__(
- description=description,
- properties=properties,
- tags=tags,
- is_anonymous=is_anonymous,
- is_archived=is_archived,
- data_uri=data_uri,
- **kwargs
- )
- self.data_type: str = "mltable"
- self.referenced_uris = referenced_uris
+ super().__init__(**kwargs)
+ self.name = None
+ self.is_data_action = None
+ self.display = display
+ self.origin = None
+ self.action_type = None
-class MLTableJobInput(AssetJobInput, JobInput):
- """MLTableJobInput.
+class OperationDisplay(_serialization.Model):
+ """Localized display information for this particular operation.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar description: Description for the input.
+ :ivar provider: The localized friendly form of the resource provider name, e.g. "Microsoft
+ Monitoring Insights" or "Microsoft Compute".
+ :vartype provider: str
+ :ivar resource: The localized friendly name of the resource type related to this operation.
+ E.g. "Virtual Machines" or "Job Schedule Collections".
+ :vartype resource: str
+ :ivar operation: The concise, localized friendly name for the operation; suitable for
+ dropdowns. E.g. "Create or Update Virtual Machine", "Restart Virtual Machine".
+ :vartype operation: str
+ :ivar description: The short, localized friendly description of the operation; suitable for
+ tool tips and detailed views.
:vartype description: str
- :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
- "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
- "triton_model".
- :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
- :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :ivar uri: [Required] Input Asset URI. Required.
- :vartype uri: str
"""
_validation = {
- "job_input_type": {"required": True},
- "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "provider": {"readonly": True},
+ "resource": {"readonly": True},
+ "operation": {"readonly": True},
+ "description": {"readonly": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_input_type": {"key": "jobInputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
- }
-
- def __init__(
- self,
- *,
- uri: str,
- description: Optional[str] = None,
- mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword description: Description for the input.
- :paramtype description: str
- :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :keyword uri: [Required] Input Asset URI. Required.
- :paramtype uri: str
- """
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_input_type: str = "mltable"
- self.mode = mode
- self.uri = uri
+ "provider": {"key": "provider", "type": "str"},
+ "resource": {"key": "resource", "type": "str"},
+ "operation": {"key": "operation", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ }
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.provider = None
+ self.resource = None
+ self.operation = None
+ self.description = None
-class MLTableJobOutput(AssetJobOutput, JobOutput):
- """MLTableJobOutput.
- All required parameters must be populated in order to send to Azure.
+class OperationListResult(_serialization.Model):
+ """A list of REST API operations supported by an Azure Resource Provider. It contains an URL link
+ to get the next set of results.
- :ivar description: Description for the output.
- :vartype description: str
- :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
- "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
- :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :ivar uri: Output Asset URI.
- :vartype uri: str
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: List of operations supported by the resource provider.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Operation]
+ :ivar next_link: URL to get the next set of operation list results (if there are any).
+ :vartype next_link: str
"""
_validation = {
- "job_output_type": {"required": True},
+ "value": {"readonly": True},
+ "next_link": {"readonly": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_output_type": {"key": "jobOutputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
+ "value": {"key": "value", "type": "[Operation]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.value = None
+ self.next_link = None
+
+
+class OsPatchingStatus(_serialization.Model):
+ """Returns metadata about the os patching.
+
+ :ivar patch_status: The os patching status. Known values are: "CompletedWithWarnings",
+ "Failed", "InProgress", "Succeeded", and "Unknown".
+ :vartype patch_status: str or ~azure.mgmt.machinelearningservices.models.PatchStatus
+ :ivar latest_patch_time: Time of the latest os patching.
+ :vartype latest_patch_time: str
+ :ivar reboot_pending: Specifies whether this compute instance is pending for reboot to finish
+ os patching.
+ :vartype reboot_pending: bool
+ :ivar scheduled_reboot_time: Time of scheduled reboot.
+ :vartype scheduled_reboot_time: str
+ """
+
+ _attribute_map = {
+ "patch_status": {"key": "patchStatus", "type": "str"},
+ "latest_patch_time": {"key": "latestPatchTime", "type": "str"},
+ "reboot_pending": {"key": "rebootPending", "type": "bool"},
+ "scheduled_reboot_time": {"key": "scheduledRebootTime", "type": "str"},
}
def __init__(
self,
*,
- description: Optional[str] = None,
- mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
- uri: Optional[str] = None,
+ patch_status: Optional[Union[str, "_models.PatchStatus"]] = None,
+ latest_patch_time: Optional[str] = None,
+ reboot_pending: Optional[bool] = None,
+ scheduled_reboot_time: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: Description for the output.
- :paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :keyword uri: Output Asset URI.
- :paramtype uri: str
+ :keyword patch_status: The os patching status. Known values are: "CompletedWithWarnings",
+ "Failed", "InProgress", "Succeeded", and "Unknown".
+ :paramtype patch_status: str or ~azure.mgmt.machinelearningservices.models.PatchStatus
+ :keyword latest_patch_time: Time of the latest os patching.
+ :paramtype latest_patch_time: str
+ :keyword reboot_pending: Specifies whether this compute instance is pending for reboot to
+ finish os patching.
+ :paramtype reboot_pending: bool
+ :keyword scheduled_reboot_time: Time of scheduled reboot.
+ :paramtype scheduled_reboot_time: str
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_output_type: str = "mltable"
- self.mode = mode
- self.uri = uri
+ super().__init__(**kwargs)
+ self.patch_status = patch_status
+ self.latest_patch_time = latest_patch_time
+ self.reboot_pending = reboot_pending
+ self.scheduled_reboot_time = scheduled_reboot_time
-class ModelContainer(Resource):
- """Azure Resource Manager resource envelope.
+class OutboundRuleBasicResource(Resource):
+ """OutboundRuleBasicResource.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -15046,8 +23975,9 @@ class ModelContainer(Resource):
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerProperties
+ :ivar properties: Outbound Rule for the managed network of a machine learning workspace.
+ Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.OutboundRule
"""
_validation = {
@@ -15063,1994 +23993,2333 @@ class ModelContainer(Resource):
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "ModelContainerProperties"},
+ "properties": {"key": "properties", "type": "OutboundRule"},
}
- def __init__(self, *, properties: "_models.ModelContainerProperties", **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.OutboundRule", **kwargs: Any) -> None:
"""
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerProperties
+ :keyword properties: Outbound Rule for the managed network of a machine learning workspace.
+ Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.OutboundRule
"""
super().__init__(**kwargs)
self.properties = properties
-class ModelContainerProperties(AssetContainer):
- """ModelContainerProperties.
-
- Variables are only populated by the server, and will be ignored when sending a request.
+class OutboundRuleListResult(_serialization.Model):
+ """List of outbound rules for the managed network of a machine learning workspace.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar latest_version: The latest version inside this container.
- :vartype latest_version: str
- :ivar next_version: The next auto incremental version.
- :vartype next_version: str
- :ivar provisioning_state: Provisioning state for the model container. Known values are:
- "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar next_link: The link to the next page constructed using the continuationToken. If null,
+ there are no additional pages.
+ :vartype next_link: str
+ :ivar value: The list of machine learning workspaces. Since this list may be incomplete, the
+ nextLink field should be used to request the next list of machine learning workspaces.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
"""
- _validation = {
- "latest_version": {"readonly": True},
- "next_version": {"readonly": True},
- "provisioning_state": {"readonly": True},
- }
-
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "latest_version": {"key": "latestVersion", "type": "str"},
- "next_version": {"key": "nextVersion", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[OutboundRuleBasicResource]"},
}
def __init__(
self,
*,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- is_archived: bool = False,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.OutboundRuleBasicResource"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
+ :keyword next_link: The link to the next page constructed using the continuationToken. If
+ null, there are no additional pages.
+ :paramtype next_link: str
+ :keyword value: The list of machine learning workspaces. Since this list may be incomplete, the
+ nextLink field should be used to request the next list of machine learning workspaces.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
"""
- super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
- self.provisioning_state = None
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
-class ModelContainerResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of ModelContainer entities.
+class OutputPathAssetReference(AssetReferenceBase):
+ """Reference to an asset via its path in a job output.
- :ivar next_link: The link to the next page of ModelContainer objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type ModelContainer.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ModelContainer]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
+ are: "Id", "DataPath", and "OutputPath".
+ :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
+ :ivar job_id: ARM resource ID of the job.
+ :vartype job_id: str
+ :ivar path: The path of the file/directory in the job output.
+ :vartype path: str
"""
+ _validation = {
+ "reference_type": {"required": True},
+ }
+
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[ModelContainer]"},
+ "reference_type": {"key": "referenceType", "type": "str"},
+ "job_id": {"key": "jobId", "type": "str"},
+ "path": {"key": "path", "type": "str"},
}
- def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.ModelContainer"]] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, job_id: Optional[str] = None, path: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword next_link: The link to the next page of ModelContainer objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type ModelContainer.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelContainer]
+ :keyword job_id: ARM resource ID of the job.
+ :paramtype job_id: str
+ :keyword path: The path of the file/directory in the job output.
+ :paramtype path: str
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
+ self.reference_type: str = "OutputPath"
+ self.job_id = job_id
+ self.path = path
-class ModelVersion(Resource):
- """Azure Resource Manager resource envelope.
+class PackageInputPathBase(_serialization.Model):
+ """PackageInputPathBase.
- Variables are only populated by the server, and will be ignored when sending a request.
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ PackageInputPathId, PackageInputPathVersion, PackageInputPathUrl
All required parameters must be populated in order to send to Azure.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionProperties
+ :ivar input_path_type: [Required] Input path type for package inputs. Required. Known values
+ are: "Url", "PathId", and "PathVersion".
+ :vartype input_path_type: str or ~azure.mgmt.machinelearningservices.models.InputPathType
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
+ "input_path_type": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "ModelVersionProperties"},
+ "input_path_type": {"key": "inputPathType", "type": "str"},
}
- def __init__(self, *, properties: "_models.ModelVersionProperties", **kwargs: Any) -> None:
+ _subtype_map = {
+ "input_path_type": {
+ "PathId": "PackageInputPathId",
+ "PathVersion": "PackageInputPathVersion",
+ "Url": "PackageInputPathUrl",
+ }
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.input_path_type: Optional[str] = None
+
+
+class PackageInputPathId(PackageInputPathBase):
+ """Package input path specified with a resource id.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar input_path_type: [Required] Input path type for package inputs. Required. Known values
+ are: "Url", "PathId", and "PathVersion".
+ :vartype input_path_type: str or ~azure.mgmt.machinelearningservices.models.InputPathType
+ :ivar resource_id: Input resource id.
+ :vartype resource_id: str
+ """
+
+ _validation = {
+ "input_path_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "input_path_type": {"key": "inputPathType", "type": "str"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ }
+
+ def __init__(self, *, resource_id: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionProperties
+ :keyword resource_id: Input resource id.
+ :paramtype resource_id: str
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.input_path_type: str = "PathId"
+ self.resource_id = resource_id
+
+
+class PackageInputPathUrl(PackageInputPathBase):
+ """Package input path specified as an url.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar input_path_type: [Required] Input path type for package inputs. Required. Known values
+ are: "Url", "PathId", and "PathVersion".
+ :vartype input_path_type: str or ~azure.mgmt.machinelearningservices.models.InputPathType
+ :ivar url: Input path url.
+ :vartype url: str
+ """
+
+ _validation = {
+ "input_path_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "input_path_type": {"key": "inputPathType", "type": "str"},
+ "url": {"key": "url", "type": "str"},
+ }
+
+ def __init__(self, *, url: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword url: Input path url.
+ :paramtype url: str
+ """
+ super().__init__(**kwargs)
+ self.input_path_type: str = "Url"
+ self.url = url
+
+
+class PackageInputPathVersion(PackageInputPathBase):
+ """Package input path specified with name and version.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar input_path_type: [Required] Input path type for package inputs. Required. Known values
+ are: "Url", "PathId", and "PathVersion".
+ :vartype input_path_type: str or ~azure.mgmt.machinelearningservices.models.InputPathType
+ :ivar resource_name: Input resource name.
+ :vartype resource_name: str
+ :ivar resource_version: Input resource version.
+ :vartype resource_version: str
+ """
+
+ _validation = {
+ "input_path_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "input_path_type": {"key": "inputPathType", "type": "str"},
+ "resource_name": {"key": "resourceName", "type": "str"},
+ "resource_version": {"key": "resourceVersion", "type": "str"},
+ }
+
+ def __init__(
+ self, *, resource_name: Optional[str] = None, resource_version: Optional[str] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword resource_name: Input resource name.
+ :paramtype resource_name: str
+ :keyword resource_version: Input resource version.
+ :paramtype resource_version: str
+ """
+ super().__init__(**kwargs)
+ self.input_path_type: str = "PathVersion"
+ self.resource_name = resource_name
+ self.resource_version = resource_version
-class ModelVersionProperties(AssetBase): # pylint: disable=too-many-instance-attributes
- """Model asset version details.
+class PackageRequest(_serialization.Model):
+ """Model package operation request properties.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
+ :ivar base_environment_source: Base environment to start with.
+ :vartype base_environment_source:
+ ~azure.mgmt.machinelearningservices.models.BaseEnvironmentSource
+ :ivar environment_variables: Collection of environment variables.
+ :vartype environment_variables: dict[str, str]
+ :ivar inferencing_server: [Required] Inferencing server configurations. Required.
+ :vartype inferencing_server: ~azure.mgmt.machinelearningservices.models.InferencingServer
+ :ivar inputs: Collection of inputs.
+ :vartype inputs: list[~azure.mgmt.machinelearningservices.models.ModelPackageInput]
+ :ivar model_configuration: Model configuration including the mount mode.
+ :vartype model_configuration: ~azure.mgmt.machinelearningservices.models.ModelConfiguration
+ :ivar properties: Property dictionary. Properties can be added, removed, and updated.
:vartype properties: dict[str, str]
:ivar tags: Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
- :vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar flavors: Mapping of model flavors to their properties.
- :vartype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
- :ivar job_name: Name of the training job which produced this model.
- :vartype job_name: str
- :ivar model_type: The storage format for this entity. Used for NCD.
- :vartype model_type: str
- :ivar model_uri: The URI path to the model contents.
- :vartype model_uri: str
- :ivar provisioning_state: Provisioning state for the model version. Known values are:
- "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
- :ivar stage: Stage in the model lifecycle assigned to this model.
- :vartype stage: str
+ :ivar target_environment_id: [Required] Arm ID of the target environment to be created by
+ package operation. Required.
+ :vartype target_environment_id: str
"""
_validation = {
- "provisioning_state": {"readonly": True},
+ "inferencing_server": {"required": True},
+ "target_environment_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
+ "base_environment_source": {"key": "baseEnvironmentSource", "type": "BaseEnvironmentSource"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "inferencing_server": {"key": "inferencingServer", "type": "InferencingServer"},
+ "inputs": {"key": "inputs", "type": "[ModelPackageInput]"},
+ "model_configuration": {"key": "modelConfiguration", "type": "ModelConfiguration"},
"properties": {"key": "properties", "type": "{str}"},
"tags": {"key": "tags", "type": "{str}"},
- "is_anonymous": {"key": "isAnonymous", "type": "bool"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "flavors": {"key": "flavors", "type": "{FlavorData}"},
- "job_name": {"key": "jobName", "type": "str"},
- "model_type": {"key": "modelType", "type": "str"},
- "model_uri": {"key": "modelUri", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "stage": {"key": "stage", "type": "str"},
+ "target_environment_id": {"key": "targetEnvironmentId", "type": "str"},
}
def __init__(
self,
*,
- description: Optional[str] = None,
+ inferencing_server: "_models.InferencingServer",
+ target_environment_id: str,
+ base_environment_source: Optional["_models.BaseEnvironmentSource"] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ inputs: Optional[List["_models.ModelPackageInput"]] = None,
+ model_configuration: Optional["_models.ModelConfiguration"] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
- is_anonymous: bool = False,
- is_archived: bool = False,
- flavors: Optional[Dict[str, "_models.FlavorData"]] = None,
- job_name: Optional[str] = None,
- model_type: Optional[str] = None,
- model_uri: Optional[str] = None,
- stage: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
+ :keyword base_environment_source: Base environment to start with.
+ :paramtype base_environment_source:
+ ~azure.mgmt.machinelearningservices.models.BaseEnvironmentSource
+ :keyword environment_variables: Collection of environment variables.
+ :paramtype environment_variables: dict[str, str]
+ :keyword inferencing_server: [Required] Inferencing server configurations. Required.
+ :paramtype inferencing_server: ~azure.mgmt.machinelearningservices.models.InferencingServer
+ :keyword inputs: Collection of inputs.
+ :paramtype inputs: list[~azure.mgmt.machinelearningservices.models.ModelPackageInput]
+ :keyword model_configuration: Model configuration including the mount mode.
+ :paramtype model_configuration: ~azure.mgmt.machinelearningservices.models.ModelConfiguration
+ :keyword properties: Property dictionary. Properties can be added, removed, and updated.
:paramtype properties: dict[str, str]
:keyword tags: Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
- :paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword flavors: Mapping of model flavors to their properties.
- :paramtype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
- :keyword job_name: Name of the training job which produced this model.
- :paramtype job_name: str
- :keyword model_type: The storage format for this entity. Used for NCD.
- :paramtype model_type: str
- :keyword model_uri: The URI path to the model contents.
- :paramtype model_uri: str
- :keyword stage: Stage in the model lifecycle assigned to this model.
- :paramtype stage: str
+ :keyword target_environment_id: [Required] Arm ID of the target environment to be created by
+ package operation. Required.
+ :paramtype target_environment_id: str
"""
- super().__init__(
- description=description,
- properties=properties,
- tags=tags,
- is_anonymous=is_anonymous,
- is_archived=is_archived,
- **kwargs
- )
- self.flavors = flavors
- self.job_name = job_name
- self.model_type = model_type
- self.model_uri = model_uri
- self.provisioning_state = None
- self.stage = stage
+ super().__init__(**kwargs)
+ self.base_environment_source = base_environment_source
+ self.environment_variables = environment_variables
+ self.inferencing_server = inferencing_server
+ self.inputs = inputs
+ self.model_configuration = model_configuration
+ self.properties = properties
+ self.tags = tags
+ self.target_environment_id = target_environment_id
-class ModelVersionResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of ModelVersion entities.
+class PackageResponse(_serialization.Model): # pylint: disable=too-many-instance-attributes
+ """Package response returned after async package operation completes successfully.
- :ivar next_link: The link to the next page of ModelVersion objects. If null, there are no
- additional pages.
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar base_environment_source: Base environment to start with.
+ :vartype base_environment_source:
+ ~azure.mgmt.machinelearningservices.models.BaseEnvironmentSource
+ :ivar build_id: Build id of the image build operation.
+ :vartype build_id: str
+ :ivar build_state: Build state of the image build operation. Known values are: "NotStarted",
+ "Running", "Succeeded", and "Failed".
+ :vartype build_state: str or ~azure.mgmt.machinelearningservices.models.PackageBuildState
+ :ivar environment_variables: Collection of environment variables.
+ :vartype environment_variables: dict[str, str]
+ :ivar inferencing_server: Inferencing server configurations.
+ :vartype inferencing_server: ~azure.mgmt.machinelearningservices.models.InferencingServer
+ :ivar inputs: Collection of inputs.
+ :vartype inputs: list[~azure.mgmt.machinelearningservices.models.ModelPackageInput]
+ :ivar log_url: Log url of the image build operation.
+ :vartype log_url: str
+ :ivar model_configuration: Model configuration including the mount mode.
+ :vartype model_configuration: ~azure.mgmt.machinelearningservices.models.ModelConfiguration
+ :ivar properties: Property dictionary. Tags can be added, removed, and updated.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar target_environment_id: Asset ID of the target environment created by package operation.
+ :vartype target_environment_id: str
+ """
+
+ _validation = {
+ "base_environment_source": {"readonly": True},
+ "build_id": {"readonly": True},
+ "build_state": {"readonly": True},
+ "environment_variables": {"readonly": True},
+ "inferencing_server": {"readonly": True},
+ "inputs": {"readonly": True},
+ "log_url": {"readonly": True},
+ "model_configuration": {"readonly": True},
+ "properties": {"readonly": True},
+ "tags": {"readonly": True},
+ "target_environment_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "base_environment_source": {"key": "baseEnvironmentSource", "type": "BaseEnvironmentSource"},
+ "build_id": {"key": "buildId", "type": "str"},
+ "build_state": {"key": "buildState", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "inferencing_server": {"key": "inferencingServer", "type": "InferencingServer"},
+ "inputs": {"key": "inputs", "type": "[ModelPackageInput]"},
+ "log_url": {"key": "logUrl", "type": "str"},
+ "model_configuration": {"key": "modelConfiguration", "type": "ModelConfiguration"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "target_environment_id": {"key": "targetEnvironmentId", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.base_environment_source = None
+ self.build_id = None
+ self.build_state = None
+ self.environment_variables = None
+ self.inferencing_server = None
+ self.inputs = None
+ self.log_url = None
+ self.model_configuration = None
+ self.properties = None
+ self.tags = None
+ self.target_environment_id = None
+
+
+class PaginatedComputeResourcesList(_serialization.Model):
+ """Paginated list of Machine Learning compute objects wrapped in ARM resource envelope.
+
+ :ivar value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
+ :ivar next_link: A continuation link (absolute URI) to the next page of results in the list.
:vartype next_link: str
- :ivar value: An array of objects of type ModelVersion.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ModelVersion]
"""
_attribute_map = {
+ "value": {"key": "value", "type": "[ComputeResource]"},
"next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[ModelVersion]"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.ModelVersion"]] = None, **kwargs: Any
+ self, *, value: Optional[List["_models.ComputeResource"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of ModelVersion objects. If null, there are no
- additional pages.
+ :keyword value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
+ :keyword next_link: A continuation link (absolute URI) to the next page of results in the list.
:paramtype next_link: str
- :keyword value: An array of objects of type ModelVersion.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelVersion]
"""
super().__init__(**kwargs)
- self.next_link = next_link
self.value = value
+ self.next_link = next_link
-class Mpi(DistributionConfiguration):
- """MPI distribution configuration.
-
- All required parameters must be populated in order to send to Azure.
+class PartialBatchDeployment(_serialization.Model):
+ """Mutable batch inference settings per deployment.
- :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
- Known values are: "PyTorch", "TensorFlow", and "Mpi".
- :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
- :ivar process_count_per_instance: Number of processes per MPI node.
- :vartype process_count_per_instance: int
+ :ivar description: Description of the endpoint deployment.
+ :vartype description: str
"""
- _validation = {
- "distribution_type": {"required": True},
- }
-
_attribute_map = {
- "distribution_type": {"key": "distributionType", "type": "str"},
- "process_count_per_instance": {"key": "processCountPerInstance", "type": "int"},
+ "description": {"key": "description", "type": "str"},
}
- def __init__(self, *, process_count_per_instance: Optional[int] = None, **kwargs: Any) -> None:
+ def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword process_count_per_instance: Number of processes per MPI node.
- :paramtype process_count_per_instance: int
+ :keyword description: Description of the endpoint deployment.
+ :paramtype description: str
"""
super().__init__(**kwargs)
- self.distribution_type: str = "Mpi"
- self.process_count_per_instance = process_count_per_instance
+ self.description = description
-class NlpVertical(_serialization.Model):
- """Abstract class for NLP related AutoML tasks.
- NLP - Natural Language Processing.
+class PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties(_serialization.Model):
+ """Strictly used in update requests.
- :ivar featurization_settings: Featurization inputs needed for AutoML job.
- :vartype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
- :ivar limit_settings: Execution constraints for AutoMLJob.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar properties: Additional attributes of the entity.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
"""
_attribute_map = {
- "featurization_settings": {"key": "featurizationSettings", "type": "NlpVerticalFeaturizationSettings"},
- "limit_settings": {"key": "limitSettings", "type": "NlpVerticalLimitSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "properties": {"key": "properties", "type": "PartialBatchDeployment"},
+ "tags": {"key": "tags", "type": "{str}"},
}
def __init__(
self,
*,
- featurization_settings: Optional["_models.NlpVerticalFeaturizationSettings"] = None,
- limit_settings: Optional["_models.NlpVerticalLimitSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
+ properties: Optional["_models.PartialBatchDeployment"] = None,
+ tags: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> None:
"""
- :keyword featurization_settings: Featurization inputs needed for AutoML job.
- :paramtype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
- :keyword limit_settings: Execution constraints for AutoMLJob.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword properties: Additional attributes of the entity.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
- self.featurization_settings = featurization_settings
- self.limit_settings = limit_settings
- self.validation_data = validation_data
+ self.properties = properties
+ self.tags = tags
-class NlpVerticalFeaturizationSettings(FeaturizationSettings):
- """NlpVerticalFeaturizationSettings.
+class PartialJobBase(_serialization.Model):
+ """Mutable base definition for a job.
- :ivar dataset_language: Dataset language, useful for the text data.
- :vartype dataset_language: str
+ :ivar notification_setting: Mutable notification setting for the job.
+ :vartype notification_setting:
+ ~azure.mgmt.machinelearningservices.models.PartialNotificationSetting
"""
_attribute_map = {
- "dataset_language": {"key": "datasetLanguage", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "PartialNotificationSetting"},
}
- def __init__(self, *, dataset_language: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, notification_setting: Optional["_models.PartialNotificationSetting"] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword dataset_language: Dataset language, useful for the text data.
- :paramtype dataset_language: str
+ :keyword notification_setting: Mutable notification setting for the job.
+ :paramtype notification_setting:
+ ~azure.mgmt.machinelearningservices.models.PartialNotificationSetting
"""
- super().__init__(dataset_language=dataset_language, **kwargs)
+ super().__init__(**kwargs)
+ self.notification_setting = notification_setting
-class NlpVerticalLimitSettings(_serialization.Model):
- """Job execution constraints.
+class PartialJobBasePartialResource(_serialization.Model):
+ """Azure Resource Manager resource envelope strictly used in update requests.
- :ivar max_concurrent_trials: Maximum Concurrent AutoML iterations.
- :vartype max_concurrent_trials: int
- :ivar max_trials: Number of AutoML iterations.
- :vartype max_trials: int
- :ivar timeout: AutoML job timeout.
- :vartype timeout: ~datetime.timedelta
+ :ivar properties: Additional attributes of the entity.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.PartialJobBase
"""
_attribute_map = {
- "max_concurrent_trials": {"key": "maxConcurrentTrials", "type": "int"},
- "max_trials": {"key": "maxTrials", "type": "int"},
- "timeout": {"key": "timeout", "type": "duration"},
+ "properties": {"key": "properties", "type": "PartialJobBase"},
}
- def __init__(
- self, *, max_concurrent_trials: int = 1, max_trials: int = 1, timeout: datetime.timedelta = "P7D", **kwargs: Any
- ) -> None:
+ def __init__(self, *, properties: Optional["_models.PartialJobBase"] = None, **kwargs: Any) -> None:
"""
- :keyword max_concurrent_trials: Maximum Concurrent AutoML iterations.
- :paramtype max_concurrent_trials: int
- :keyword max_trials: Number of AutoML iterations.
- :paramtype max_trials: int
- :keyword timeout: AutoML job timeout.
- :paramtype timeout: ~datetime.timedelta
+ :keyword properties: Additional attributes of the entity.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.PartialJobBase
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class PartialManagedServiceIdentity(_serialization.Model):
+ """Managed service identity (system assigned and/or user assigned identities).
+
+ :ivar type: Managed service identity (system assigned and/or user assigned identities). Known
+ values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :ivar user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
+ The dictionary values can be empty objects ({}) in requests.
+ :vartype user_assigned_identities: dict[str, JSON]
+ """
+
+ _attribute_map = {
+ "type": {"key": "type", "type": "str"},
+ "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{object}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ type: Optional[Union[str, "_models.ManagedServiceIdentityType"]] = None,
+ user_assigned_identities: Optional[Dict[str, JSON]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword type: Managed service identity (system assigned and/or user assigned identities).
+ Known values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned".
+ :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :keyword user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
+ The dictionary values can be empty objects ({}) in requests.
+ :paramtype user_assigned_identities: dict[str, JSON]
"""
super().__init__(**kwargs)
- self.max_concurrent_trials = max_concurrent_trials
- self.max_trials = max_trials
- self.timeout = timeout
-
+ self.type = type
+ self.user_assigned_identities = user_assigned_identities
-class NodeStateCounts(_serialization.Model):
- """Counts of various compute node states on the amlCompute.
- Variables are only populated by the server, and will be ignored when sending a request.
+class PartialMinimalTrackedResource(_serialization.Model):
+ """Strictly used in update requests.
- :ivar idle_node_count: Number of compute nodes in idle state.
- :vartype idle_node_count: int
- :ivar running_node_count: Number of compute nodes which are running jobs.
- :vartype running_node_count: int
- :ivar preparing_node_count: Number of compute nodes which are being prepared.
- :vartype preparing_node_count: int
- :ivar unusable_node_count: Number of compute nodes which are in unusable state.
- :vartype unusable_node_count: int
- :ivar leaving_node_count: Number of compute nodes which are leaving the amlCompute.
- :vartype leaving_node_count: int
- :ivar preempted_node_count: Number of compute nodes which are in preempted state.
- :vartype preempted_node_count: int
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
"""
- _validation = {
- "idle_node_count": {"readonly": True},
- "running_node_count": {"readonly": True},
- "preparing_node_count": {"readonly": True},
- "unusable_node_count": {"readonly": True},
- "leaving_node_count": {"readonly": True},
- "preempted_node_count": {"readonly": True},
- }
-
_attribute_map = {
- "idle_node_count": {"key": "idleNodeCount", "type": "int"},
- "running_node_count": {"key": "runningNodeCount", "type": "int"},
- "preparing_node_count": {"key": "preparingNodeCount", "type": "int"},
- "unusable_node_count": {"key": "unusableNodeCount", "type": "int"},
- "leaving_node_count": {"key": "leavingNodeCount", "type": "int"},
- "preempted_node_count": {"key": "preemptedNodeCount", "type": "int"},
+ "tags": {"key": "tags", "type": "{str}"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ """
super().__init__(**kwargs)
- self.idle_node_count = None
- self.running_node_count = None
- self.preparing_node_count = None
- self.unusable_node_count = None
- self.leaving_node_count = None
- self.preempted_node_count = None
-
+ self.tags = tags
-class NoneAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
- """NoneAuthTypeWorkspaceConnectionProperties.
- All required parameters must be populated in order to send to Azure.
+class PartialMinimalTrackedResourceWithIdentity(PartialMinimalTrackedResource):
+ """Strictly used in update requests.
- :ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
- :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
- :ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :ivar target:
- :vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
"""
- _validation = {
- "auth_type": {"required": True},
- }
-
_attribute_map = {
- "auth_type": {"key": "authType", "type": "str"},
- "category": {"key": "category", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "identity": {"key": "identity", "type": "PartialManagedServiceIdentity"},
}
def __init__(
self,
*,
- category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
- target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.PartialManagedServiceIdentity"] = None,
**kwargs: Any
) -> None:
"""
- :keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :keyword target:
- :paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
"""
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
- self.auth_type: str = "None"
-
+ super().__init__(tags=tags, **kwargs)
+ self.identity = identity
-class NoneDatastoreCredentials(DatastoreCredentials):
- """Empty/none datastore credentials.
- All required parameters must be populated in order to send to Azure.
+class PartialMinimalTrackedResourceWithSku(PartialMinimalTrackedResource):
+ """Strictly used in update requests.
- :ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
- :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
"""
- _validation = {
- "credentials_type": {"required": True},
- }
-
_attribute_map = {
- "credentials_type": {"key": "credentialsType", "type": "str"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "sku": {"key": "sku", "type": "PartialSku"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.credentials_type: str = "None"
-
+ def __init__(
+ self, *, tags: Optional[Dict[str, str]] = None, sku: Optional["_models.PartialSku"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ """
+ super().__init__(tags=tags, **kwargs)
+ self.sku = sku
-class NotebookAccessTokenResult(_serialization.Model):
- """NotebookAccessTokenResult.
- Variables are only populated by the server, and will be ignored when sending a request.
+class PartialMinimalTrackedResourceWithSkuAndIdentity(PartialMinimalTrackedResource):
+ """Strictly used in update requests.
- :ivar notebook_resource_id:
- :vartype notebook_resource_id: str
- :ivar host_name:
- :vartype host_name: str
- :ivar public_dns:
- :vartype public_dns: str
- :ivar access_token:
- :vartype access_token: str
- :ivar token_type:
- :vartype token_type: str
- :ivar expires_in:
- :vartype expires_in: int
- :ivar refresh_token:
- :vartype refresh_token: str
- :ivar scope:
- :vartype scope: str
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
"""
- _validation = {
- "notebook_resource_id": {"readonly": True},
- "host_name": {"readonly": True},
- "public_dns": {"readonly": True},
- "access_token": {"readonly": True},
- "token_type": {"readonly": True},
- "expires_in": {"readonly": True},
- "refresh_token": {"readonly": True},
- "scope": {"readonly": True},
+ _attribute_map = {
+ "tags": {"key": "tags", "type": "{str}"},
+ "identity": {"key": "identity", "type": "PartialManagedServiceIdentity"},
+ "sku": {"key": "sku", "type": "PartialSku"},
}
+ def __init__(
+ self,
+ *,
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.PartialManagedServiceIdentity"] = None,
+ sku: Optional["_models.PartialSku"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ """
+ super().__init__(tags=tags, **kwargs)
+ self.identity = identity
+ self.sku = sku
+
+
+class PartialNotificationSetting(_serialization.Model):
+ """Mutable configuration for notification.
+
+ :ivar webhooks: Send webhook callback to a service. Key is a user-provided name for the
+ webhook.
+ :vartype webhooks: dict[str, ~azure.mgmt.machinelearningservices.models.Webhook]
+ """
+
_attribute_map = {
- "notebook_resource_id": {"key": "notebookResourceId", "type": "str"},
- "host_name": {"key": "hostName", "type": "str"},
- "public_dns": {"key": "publicDns", "type": "str"},
- "access_token": {"key": "accessToken", "type": "str"},
- "token_type": {"key": "tokenType", "type": "str"},
- "expires_in": {"key": "expiresIn", "type": "int"},
- "refresh_token": {"key": "refreshToken", "type": "str"},
- "scope": {"key": "scope", "type": "str"},
+ "webhooks": {"key": "webhooks", "type": "{Webhook}"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(self, *, webhooks: Optional[Dict[str, "_models.Webhook"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword webhooks: Send webhook callback to a service. Key is a user-provided name for the
+ webhook.
+ :paramtype webhooks: dict[str, ~azure.mgmt.machinelearningservices.models.Webhook]
+ """
super().__init__(**kwargs)
- self.notebook_resource_id = None
- self.host_name = None
- self.public_dns = None
- self.access_token = None
- self.token_type = None
- self.expires_in = None
- self.refresh_token = None
- self.scope = None
+ self.webhooks = webhooks
-class NotebookPreparationError(_serialization.Model):
- """NotebookPreparationError.
+class PartialRegistryPartialTrackedResource(_serialization.Model):
+ """Strictly used in update requests.
- :ivar error_message:
- :vartype error_message: str
- :ivar status_code:
- :vartype status_code: int
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity:
+ ~azure.mgmt.machinelearningservices.models.RegistryPartialManagedServiceIdentity
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
"""
_attribute_map = {
- "error_message": {"key": "errorMessage", "type": "str"},
- "status_code": {"key": "statusCode", "type": "int"},
+ "identity": {"key": "identity", "type": "RegistryPartialManagedServiceIdentity"},
+ "sku": {"key": "sku", "type": "PartialSku"},
+ "tags": {"key": "tags", "type": "{str}"},
}
def __init__(
- self, *, error_message: Optional[str] = None, status_code: Optional[int] = None, **kwargs: Any
+ self,
+ *,
+ identity: Optional["_models.RegistryPartialManagedServiceIdentity"] = None,
+ sku: Optional["_models.PartialSku"] = None,
+ tags: Optional[Dict[str, str]] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword error_message:
- :paramtype error_message: str
- :keyword status_code:
- :paramtype status_code: int
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity:
+ ~azure.mgmt.machinelearningservices.models.RegistryPartialManagedServiceIdentity
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
- self.error_message = error_message
- self.status_code = status_code
+ self.identity = identity
+ self.sku = sku
+ self.tags = tags
-class NotebookResourceInfo(_serialization.Model):
- """NotebookResourceInfo.
+class PartialSku(_serialization.Model):
+ """Common SKU definition.
- :ivar fqdn:
- :vartype fqdn: str
- :ivar resource_id: the data plane resourceId that used to initialize notebook component.
- :vartype resource_id: str
- :ivar notebook_preparation_error: The error that occurs when preparing notebook.
- :vartype notebook_preparation_error:
- ~azure.mgmt.machinelearningservices.models.NotebookPreparationError
+ :ivar capacity: If the SKU supports scale out/in then the capacity integer should be included.
+ If scale out/in is not possible for the resource this may be omitted.
+ :vartype capacity: int
+ :ivar family: If the service has different generations of hardware, for the same SKU, then that
+ can be captured here.
+ :vartype family: str
+ :ivar name: The name of the SKU. Ex - P3. It is typically a letter+number code.
+ :vartype name: str
+ :ivar size: The SKU size. When the name field is the combination of tier and some other value,
+ this would be the standalone code.
+ :vartype size: str
+ :ivar tier: This field is required to be implemented by the Resource Provider if the service
+ has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
+ "Standard", and "Premium".
+ :vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
"""
_attribute_map = {
- "fqdn": {"key": "fqdn", "type": "str"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "notebook_preparation_error": {"key": "notebookPreparationError", "type": "NotebookPreparationError"},
+ "capacity": {"key": "capacity", "type": "int"},
+ "family": {"key": "family", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "size": {"key": "size", "type": "str"},
+ "tier": {"key": "tier", "type": "str"},
}
def __init__(
self,
*,
- fqdn: Optional[str] = None,
- resource_id: Optional[str] = None,
- notebook_preparation_error: Optional["_models.NotebookPreparationError"] = None,
+ capacity: Optional[int] = None,
+ family: Optional[str] = None,
+ name: Optional[str] = None,
+ size: Optional[str] = None,
+ tier: Optional[Union[str, "_models.SkuTier"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword fqdn:
- :paramtype fqdn: str
- :keyword resource_id: the data plane resourceId that used to initialize notebook component.
- :paramtype resource_id: str
- :keyword notebook_preparation_error: The error that occurs when preparing notebook.
- :paramtype notebook_preparation_error:
- ~azure.mgmt.machinelearningservices.models.NotebookPreparationError
+ :keyword capacity: If the SKU supports scale out/in then the capacity integer should be
+ included. If scale out/in is not possible for the resource this may be omitted.
+ :paramtype capacity: int
+ :keyword family: If the service has different generations of hardware, for the same SKU, then
+ that can be captured here.
+ :paramtype family: str
+ :keyword name: The name of the SKU. Ex - P3. It is typically a letter+number code.
+ :paramtype name: str
+ :keyword size: The SKU size. When the name field is the combination of tier and some other
+ value, this would be the standalone code.
+ :paramtype size: str
+ :keyword tier: This field is required to be implemented by the Resource Provider if the service
+ has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
+ "Standard", and "Premium".
+ :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
"""
super().__init__(**kwargs)
- self.fqdn = fqdn
- self.resource_id = resource_id
- self.notebook_preparation_error = notebook_preparation_error
+ self.capacity = capacity
+ self.family = family
+ self.name = name
+ self.size = size
+ self.tier = tier
-class Objective(_serialization.Model):
- """Optimization objective.
+class Password(_serialization.Model):
+ """Password.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar goal: [Required] Defines supported metric goals for hyperparameter tuning. Required.
- Known values are: "Minimize" and "Maximize".
- :vartype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
- :ivar primary_metric: [Required] Name of the metric to optimize. Required.
- :vartype primary_metric: str
+ :ivar name:
+ :vartype name: str
+ :ivar value:
+ :vartype value: str
"""
_validation = {
- "goal": {"required": True},
- "primary_metric": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "name": {"readonly": True},
+ "value": {"readonly": True},
}
_attribute_map = {
- "goal": {"key": "goal", "type": "str"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "value": {"key": "value", "type": "str"},
}
- def __init__(self, *, goal: Union[str, "_models.Goal"], primary_metric: str, **kwargs: Any) -> None:
- """
- :keyword goal: [Required] Defines supported metric goals for hyperparameter tuning. Required.
- Known values are: "Minimize" and "Maximize".
- :paramtype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
- :keyword primary_metric: [Required] Name of the metric to optimize. Required.
- :paramtype primary_metric: str
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.goal = goal
- self.primary_metric = primary_metric
+ self.name = None
+ self.value = None
-class OnlineDeployment(TrackedResource):
- """OnlineDeployment.
+class PATAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """PATAuthTypeWorkspaceConnectionProperties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- :ivar location: The geo-location where the resource lives. Required.
- :vartype location: str
- :ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :vartype kind: str
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentProperties
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
+ :ivar target:
+ :vartype target: str
+ :ivar credentials:
+ :vartype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPersonalAccessToken
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "location": {"required": True},
- "properties": {"required": True},
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "tags": {"key": "tags", "type": "{str}"},
- "location": {"key": "location", "type": "str"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "kind": {"key": "kind", "type": "str"},
- "properties": {"key": "properties", "type": "OnlineDeploymentProperties"},
- "sku": {"key": "sku", "type": "Sku"},
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
+ "target": {"key": "target", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionPersonalAccessToken"},
}
def __init__(
self,
*,
- location: str,
- properties: "_models.OnlineDeploymentProperties",
- tags: Optional[Dict[str, str]] = None,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- kind: Optional[str] = None,
- sku: Optional["_models.Sku"] = None,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
+ target: Optional[str] = None,
+ credentials: Optional["_models.WorkspaceConnectionPersonalAccessToken"] = None,
**kwargs: Any
) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
- :keyword location: The geo-location where the resource lives. Required.
- :paramtype location: str
- :keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :paramtype kind: str
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentProperties
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
+ :keyword target:
+ :paramtype target: str
+ :keyword credentials:
+ :paramtype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPersonalAccessToken
"""
- super().__init__(tags=tags, location=location, **kwargs)
- self.identity = identity
- self.kind = kind
- self.properties = properties
- self.sku = sku
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ metadata=metadata,
+ target=target,
+ **kwargs
+ )
+ self.auth_type: str = "PAT"
+ self.credentials = credentials
-class OnlineDeploymentTrackedResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of OnlineDeployment entities.
+class PendingUploadCredentialDto(_serialization.Model):
+ """PendingUploadCredentialDto.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ SASCredentialDto
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar credential_type: [Required] Credential type used to authentication with storage.
+ Required. "SAS"
+ :vartype credential_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadCredentialType
+ """
+
+ _validation = {
+ "credential_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "credential_type": {"key": "credentialType", "type": "str"},
+ }
+
+ _subtype_map = {"credential_type": {"SAS": "SASCredentialDto"}}
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.credential_type: Optional[str] = None
+
+
+class PendingUploadRequestDto(_serialization.Model):
+ """PendingUploadRequestDto.
- :ivar next_link: The link to the next page of OnlineDeployment objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type OnlineDeployment.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
+ :ivar pending_upload_id: If PendingUploadId = null then random guid will be used.
+ :vartype pending_upload_id: str
+ :ivar pending_upload_type: TemporaryBlobReference is the only supported type. Known values are:
+ "None" and "TemporaryBlobReference".
+ :vartype pending_upload_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadType
"""
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[OnlineDeployment]"},
+ "pending_upload_id": {"key": "pendingUploadId", "type": "str"},
+ "pending_upload_type": {"key": "pendingUploadType", "type": "str"},
}
def __init__(
self,
*,
- next_link: Optional[str] = None,
- value: Optional[List["_models.OnlineDeployment"]] = None,
+ pending_upload_id: Optional[str] = None,
+ pending_upload_type: Optional[Union[str, "_models.PendingUploadType"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of OnlineDeployment objects. If null, there are
- no additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type OnlineDeployment.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
+ :keyword pending_upload_id: If PendingUploadId = null then random guid will be used.
+ :paramtype pending_upload_id: str
+ :keyword pending_upload_type: TemporaryBlobReference is the only supported type. Known values
+ are: "None" and "TemporaryBlobReference".
+ :paramtype pending_upload_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadType
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
-
-class OnlineEndpoint(TrackedResource):
- """OnlineEndpoint.
+ self.pending_upload_id = pending_upload_id
+ self.pending_upload_type = pending_upload_type
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class PendingUploadResponseDto(_serialization.Model):
+ """PendingUploadResponseDto.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- :ivar location: The geo-location where the resource lives. Required.
- :vartype location: str
- :ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :vartype kind: str
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointProperties
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :ivar blob_reference_for_consumption: Container level read, write, list SAS.
+ :vartype blob_reference_for_consumption:
+ ~azure.mgmt.machinelearningservices.models.BlobReferenceForConsumptionDto
+ :ivar pending_upload_id: ID for this upload request.
+ :vartype pending_upload_id: str
+ :ivar pending_upload_type: TemporaryBlobReference is the only supported type. Known values are:
+ "None" and "TemporaryBlobReference".
+ :vartype pending_upload_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadType
"""
- _validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "location": {"required": True},
- "properties": {"required": True},
- }
-
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "tags": {"key": "tags", "type": "{str}"},
- "location": {"key": "location", "type": "str"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "kind": {"key": "kind", "type": "str"},
- "properties": {"key": "properties", "type": "OnlineEndpointProperties"},
- "sku": {"key": "sku", "type": "Sku"},
+ "blob_reference_for_consumption": {
+ "key": "blobReferenceForConsumption",
+ "type": "BlobReferenceForConsumptionDto",
+ },
+ "pending_upload_id": {"key": "pendingUploadId", "type": "str"},
+ "pending_upload_type": {"key": "pendingUploadType", "type": "str"},
}
def __init__(
self,
*,
- location: str,
- properties: "_models.OnlineEndpointProperties",
- tags: Optional[Dict[str, str]] = None,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- kind: Optional[str] = None,
- sku: Optional["_models.Sku"] = None,
+ blob_reference_for_consumption: Optional["_models.BlobReferenceForConsumptionDto"] = None,
+ pending_upload_id: Optional[str] = None,
+ pending_upload_type: Optional[Union[str, "_models.PendingUploadType"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
- :keyword location: The geo-location where the resource lives. Required.
- :paramtype location: str
- :keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :paramtype kind: str
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointProperties
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :keyword blob_reference_for_consumption: Container level read, write, list SAS.
+ :paramtype blob_reference_for_consumption:
+ ~azure.mgmt.machinelearningservices.models.BlobReferenceForConsumptionDto
+ :keyword pending_upload_id: ID for this upload request.
+ :paramtype pending_upload_id: str
+ :keyword pending_upload_type: TemporaryBlobReference is the only supported type. Known values
+ are: "None" and "TemporaryBlobReference".
+ :paramtype pending_upload_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadType
"""
- super().__init__(tags=tags, location=location, **kwargs)
- self.identity = identity
- self.kind = kind
- self.properties = properties
- self.sku = sku
+ super().__init__(**kwargs)
+ self.blob_reference_for_consumption = blob_reference_for_consumption
+ self.pending_upload_id = pending_upload_id
+ self.pending_upload_type = pending_upload_type
-class OnlineEndpointProperties(EndpointPropertiesBase): # pylint: disable=too-many-instance-attributes
- """Online endpoint configuration.
+class PersonalComputeInstanceSettings(_serialization.Model):
+ """Settings for a personal compute instance.
+
+ :ivar assigned_user: A user explicitly assigned to a personal compute instance.
+ :vartype assigned_user: ~azure.mgmt.machinelearningservices.models.AssignedUser
+ """
+
+ _attribute_map = {
+ "assigned_user": {"key": "assignedUser", "type": "AssignedUser"},
+ }
+
+ def __init__(self, *, assigned_user: Optional["_models.AssignedUser"] = None, **kwargs: Any) -> None:
+ """
+ :keyword assigned_user: A user explicitly assigned to a personal compute instance.
+ :paramtype assigned_user: ~azure.mgmt.machinelearningservices.models.AssignedUser
+ """
+ super().__init__(**kwargs)
+ self.assigned_user = assigned_user
+
+
+class PipelineJob(JobBaseProperties): # pylint: disable=too-many-instance-attributes
+ """Pipeline Job definition: defines generic to MFE attributes.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
- Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
- Required. Known values are: "AMLToken", "Key", and "AADToken".
- :vartype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
- :ivar description: Description of the inference endpoint.
+ :ivar description: The asset description text.
:vartype description: str
- :ivar keys: EndpointAuthKeys to set initially on an Endpoint.
- This property will always be returned as null. AuthKey values must be retrieved using the
- ListKeys API.
- :vartype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
- :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
- :ivar scoring_uri: Endpoint URI.
- :vartype scoring_uri: str
- :ivar swagger_uri: Endpoint Swagger URI.
- :vartype swagger_uri: str
- :ivar compute: ARM resource ID of the compute if it exists.
- optional.
- :vartype compute: str
- :ivar mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
- returned scoring. Traffic values need to sum to utmost 50.
- :vartype mirror_traffic: dict[str, int]
- :ivar provisioning_state: Provisioning state for the endpoint. Known values are: "Creating",
- "Deleting", "Succeeded", "Failed", "Updating", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.EndpointProvisioningState
- :ivar public_network_access: Set to "Enabled" for endpoints that should allow public access
- when Private Link is enabled. Known values are: "Enabled" and "Disabled".
- :vartype public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
- :ivar traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic values
- need to sum to 100.
- :vartype traffic: dict[str, int]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar component_id: ARM resource ID of the component resource.
+ :vartype component_id: str
+ :ivar compute_id: ARM resource ID of the compute resource.
+ :vartype compute_id: str
+ :ivar display_name: Display name of job.
+ :vartype display_name: str
+ :ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :vartype experiment_name: str
+ :ivar identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
+ "Command", "Labeling", "Sweep", "Pipeline", and "Spark".
+ :vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar secrets_configuration: Configuration for secrets to be made available during runtime.
+ :vartype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
+ :ivar services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
+ "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
+ "Canceled", "NotResponding", "Paused", "Unknown", and "Scheduled".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
+ :ivar inputs: Inputs for the pipeline job.
+ :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :ivar jobs: Jobs construct the Pipeline Job.
+ :vartype jobs: dict[str, JSON]
+ :ivar outputs: Outputs for the pipeline job.
+ :vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :ivar settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
+ :vartype settings: JSON
+ :ivar source_job_id: ARM resource ID of source job.
+ :vartype source_job_id: str
"""
_validation = {
- "auth_mode": {"required": True},
- "scoring_uri": {"readonly": True},
- "swagger_uri": {"readonly": True},
- "provisioning_state": {"readonly": True},
+ "job_type": {"required": True},
+ "status": {"readonly": True},
}
_attribute_map = {
- "auth_mode": {"key": "authMode", "type": "str"},
"description": {"key": "description", "type": "str"},
- "keys": {"key": "keys", "type": "EndpointAuthKeys"},
"properties": {"key": "properties", "type": "{str}"},
- "scoring_uri": {"key": "scoringUri", "type": "str"},
- "swagger_uri": {"key": "swaggerUri", "type": "str"},
- "compute": {"key": "compute", "type": "str"},
- "mirror_traffic": {"key": "mirrorTraffic", "type": "{int}"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "public_network_access": {"key": "publicNetworkAccess", "type": "str"},
- "traffic": {"key": "traffic", "type": "{int}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "component_id": {"key": "componentId", "type": "str"},
+ "compute_id": {"key": "computeId", "type": "str"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "experiment_name": {"key": "experimentName", "type": "str"},
+ "identity": {"key": "identity", "type": "IdentityConfiguration"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
+ "secrets_configuration": {"key": "secretsConfiguration", "type": "{SecretConfiguration}"},
+ "services": {"key": "services", "type": "{JobService}"},
+ "status": {"key": "status", "type": "str"},
+ "inputs": {"key": "inputs", "type": "{JobInput}"},
+ "jobs": {"key": "jobs", "type": "{object}"},
+ "outputs": {"key": "outputs", "type": "{JobOutput}"},
+ "settings": {"key": "settings", "type": "object"},
+ "source_job_id": {"key": "sourceJobId", "type": "str"},
}
def __init__(
self,
*,
- auth_mode: Union[str, "_models.EndpointAuthMode"],
description: Optional[str] = None,
- keys: Optional["_models.EndpointAuthKeys"] = None,
properties: Optional[Dict[str, str]] = None,
- compute: Optional[str] = None,
- mirror_traffic: Optional[Dict[str, int]] = None,
- public_network_access: Optional[Union[str, "_models.PublicNetworkAccessType"]] = None,
- traffic: Optional[Dict[str, int]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ component_id: Optional[str] = None,
+ compute_id: Optional[str] = None,
+ display_name: Optional[str] = None,
+ experiment_name: str = "Default",
+ identity: Optional["_models.IdentityConfiguration"] = None,
+ is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
+ secrets_configuration: Optional[Dict[str, "_models.SecretConfiguration"]] = None,
+ services: Optional[Dict[str, "_models.JobService"]] = None,
+ inputs: Optional[Dict[str, "_models.JobInput"]] = None,
+ jobs: Optional[Dict[str, JSON]] = None,
+ outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ settings: Optional[JSON] = None,
+ source_job_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
- Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
- Required. Known values are: "AMLToken", "Key", and "AADToken".
- :paramtype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
- :keyword description: Description of the inference endpoint.
+ :keyword description: The asset description text.
:paramtype description: str
- :keyword keys: EndpointAuthKeys to set initially on an Endpoint.
- This property will always be returned as null. AuthKey values must be retrieved using the
- ListKeys API.
- :paramtype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
- :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
- :keyword compute: ARM resource ID of the compute if it exists.
- optional.
- :paramtype compute: str
- :keyword mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
- returned scoring. Traffic values need to sum to utmost 50.
- :paramtype mirror_traffic: dict[str, int]
- :keyword public_network_access: Set to "Enabled" for endpoints that should allow public access
- when Private Link is enabled. Known values are: "Enabled" and "Disabled".
- :paramtype public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
- :keyword traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic
- values need to sum to 100.
- :paramtype traffic: dict[str, int]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword component_id: ARM resource ID of the component resource.
+ :paramtype component_id: str
+ :keyword compute_id: ARM resource ID of the compute resource.
+ :paramtype compute_id: str
+ :keyword display_name: Display name of job.
+ :paramtype display_name: str
+ :keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :paramtype experiment_name: str
+ :keyword identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword secrets_configuration: Configuration for secrets to be made available during runtime.
+ :paramtype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
+ :keyword services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :keyword inputs: Inputs for the pipeline job.
+ :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :keyword jobs: Jobs construct the Pipeline Job.
+ :paramtype jobs: dict[str, JSON]
+ :keyword outputs: Outputs for the pipeline job.
+ :paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
+ :paramtype settings: JSON
+ :keyword source_job_id: ARM resource ID of source job.
+ :paramtype source_job_id: str
"""
- super().__init__(auth_mode=auth_mode, description=description, keys=keys, properties=properties, **kwargs)
- self.compute = compute
- self.mirror_traffic = mirror_traffic
- self.provisioning_state = None
- self.public_network_access = public_network_access
- self.traffic = traffic
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ component_id=component_id,
+ compute_id=compute_id,
+ display_name=display_name,
+ experiment_name=experiment_name,
+ identity=identity,
+ is_archived=is_archived,
+ notification_setting=notification_setting,
+ secrets_configuration=secrets_configuration,
+ services=services,
+ **kwargs
+ )
+ self.job_type: str = "Pipeline"
+ self.inputs = inputs
+ self.jobs = jobs
+ self.outputs = outputs
+ self.settings = settings
+ self.source_job_id = source_job_id
-class OnlineEndpointTrackedResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of OnlineEndpoint entities.
+class PredictionDriftMonitoringSignal(MonitoringSignalBase):
+ """PredictionDriftMonitoringSignal.
- :ivar next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type OnlineEndpoint.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", "Custom",
+ "ModelPerformance", "GenerationSafetyQuality", and "GenerationTokenStatistics".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar feature_data_type_override: A dictionary that maps feature names to their respective data
+ types.
+ :vartype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :ivar metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.PredictionDriftMetricThresholdBase]
+ :ivar production_data: [Required] The data which drift will be calculated for. Required.
+ :vartype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :ivar reference_data: [Required] The data to calculate drift against. Required.
+ :vartype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
"""
- _attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[OnlineEndpoint]"},
+ _validation = {
+ "signal_type": {"required": True},
+ "metric_thresholds": {"required": True},
+ "production_data": {"required": True},
+ "reference_data": {"required": True},
}
- def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.OnlineEndpoint"]] = None, **kwargs: Any
- ) -> None:
- """
- :keyword next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type OnlineEndpoint.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
- """
- super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
-
-class OnlineRequestSettings(_serialization.Model):
- """Online deployment scoring requests configuration.
-
- :ivar max_concurrent_requests_per_instance: The number of maximum concurrent requests per node
- allowed per deployment. Defaults to 1.
- :vartype max_concurrent_requests_per_instance: int
- :ivar max_queue_wait: The maximum amount of time a request will stay in the queue in ISO 8601
- format.
- Defaults to 500ms.
- :vartype max_queue_wait: ~datetime.timedelta
- :ivar request_timeout: The scoring timeout in ISO 8601 format.
- Defaults to 5000ms.
- :vartype request_timeout: ~datetime.timedelta
- """
-
_attribute_map = {
- "max_concurrent_requests_per_instance": {"key": "maxConcurrentRequestsPerInstance", "type": "int"},
- "max_queue_wait": {"key": "maxQueueWait", "type": "duration"},
- "request_timeout": {"key": "requestTimeout", "type": "duration"},
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "feature_data_type_override": {"key": "featureDataTypeOverride", "type": "{str}"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[PredictionDriftMetricThresholdBase]"},
+ "production_data": {"key": "productionData", "type": "MonitoringInputDataBase"},
+ "reference_data": {"key": "referenceData", "type": "MonitoringInputDataBase"},
}
def __init__(
self,
*,
- max_concurrent_requests_per_instance: int = 1,
- max_queue_wait: datetime.timedelta = "PT0.5S",
- request_timeout: datetime.timedelta = "PT5S",
+ metric_thresholds: List["_models.PredictionDriftMetricThresholdBase"],
+ production_data: "_models.MonitoringInputDataBase",
+ reference_data: "_models.MonitoringInputDataBase",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ feature_data_type_override: Optional[Dict[str, Union[str, "_models.MonitoringFeatureDataType"]]] = None,
**kwargs: Any
) -> None:
"""
- :keyword max_concurrent_requests_per_instance: The number of maximum concurrent requests per
- node allowed per deployment. Defaults to 1.
- :paramtype max_concurrent_requests_per_instance: int
- :keyword max_queue_wait: The maximum amount of time a request will stay in the queue in ISO
- 8601 format.
- Defaults to 500ms.
- :paramtype max_queue_wait: ~datetime.timedelta
- :keyword request_timeout: The scoring timeout in ISO 8601 format.
- Defaults to 5000ms.
- :paramtype request_timeout: ~datetime.timedelta
- """
- super().__init__(**kwargs)
- self.max_concurrent_requests_per_instance = max_concurrent_requests_per_instance
- self.max_queue_wait = max_queue_wait
- self.request_timeout = request_timeout
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword feature_data_type_override: A dictionary that maps feature names to their respective
+ data types.
+ :paramtype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :keyword metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.PredictionDriftMetricThresholdBase]
+ :keyword production_data: [Required] The data which drift will be calculated for. Required.
+ :paramtype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :keyword reference_data: [Required] The data to calculate drift against. Required.
+ :paramtype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "PredictionDrift"
+ self.feature_data_type_override = feature_data_type_override
+ self.metric_thresholds = metric_thresholds
+ self.production_data = production_data
+ self.reference_data = reference_data
-class OutputPathAssetReference(AssetReferenceBase):
- """Reference to an asset via its path in a job output.
+class PrivateEndpoint(_serialization.Model):
+ """The Private Endpoint resource.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
- are: "Id", "DataPath", and "OutputPath".
- :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
- :ivar job_id: ARM resource ID of the job.
- :vartype job_id: str
- :ivar path: The path of the file/directory in the job output.
- :vartype path: str
+ :ivar id: The ARM identifier for Private Endpoint.
+ :vartype id: str
"""
_validation = {
- "reference_type": {"required": True},
+ "id": {"readonly": True},
}
_attribute_map = {
- "reference_type": {"key": "referenceType", "type": "str"},
- "job_id": {"key": "jobId", "type": "str"},
- "path": {"key": "path", "type": "str"},
+ "id": {"key": "id", "type": "str"},
}
- def __init__(self, *, job_id: Optional[str] = None, path: Optional[str] = None, **kwargs: Any) -> None:
- """
- :keyword job_id: ARM resource ID of the job.
- :paramtype job_id: str
- :keyword path: The path of the file/directory in the job output.
- :paramtype path: str
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.reference_type: str = "OutputPath"
- self.job_id = job_id
- self.path = path
+ self.id = None
-class PaginatedComputeResourcesList(_serialization.Model):
- """Paginated list of Machine Learning compute objects wrapped in ARM resource envelope.
+class PrivateEndpointConnection(Resource): # pylint: disable=too-many-instance-attributes
+ """The Private Endpoint Connection resource.
- :ivar value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
- :ivar next_link: A continuation link (absolute URI) to the next page of results in the list.
- :vartype next_link: str
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar location: Same as workspace location.
+ :vartype location: str
+ :ivar sku: Optional. This field is required to be implemented by the RP because AML is
+ supporting more than one tier.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :ivar tags: Dictionary of :code:``.
+ :vartype tags: dict[str, str]
+ :ivar private_endpoint: The Private Endpoint resource.
+ :vartype private_endpoint:
+ ~azure.mgmt.machinelearningservices.models.WorkspacePrivateEndpointResource
+ :ivar private_link_service_connection_state: The connection state.
+ :vartype private_link_service_connection_state:
+ ~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
+ :ivar provisioning_state: The current provisioning state. Known values are: "Succeeded",
+ "Creating", "Deleting", and "Failed".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnectionProvisioningState
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ }
+
_attribute_map = {
- "value": {"key": "value", "type": "[ComputeResource]"},
- "next_link": {"key": "nextLink", "type": "str"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "location": {"key": "location", "type": "str"},
+ "sku": {"key": "sku", "type": "Sku"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "private_endpoint": {"key": "properties.privateEndpoint", "type": "WorkspacePrivateEndpointResource"},
+ "private_link_service_connection_state": {
+ "key": "properties.privateLinkServiceConnectionState",
+ "type": "PrivateLinkServiceConnectionState",
+ },
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
- self, *, value: Optional[List["_models.ComputeResource"]] = None, next_link: Optional[str] = None, **kwargs: Any
+ self,
+ *,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ location: Optional[str] = None,
+ sku: Optional["_models.Sku"] = None,
+ tags: Optional[Dict[str, str]] = None,
+ private_endpoint: Optional["_models.WorkspacePrivateEndpointResource"] = None,
+ private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None,
+ provisioning_state: Optional[Union[str, "_models.PrivateEndpointConnectionProvisioningState"]] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
- :keyword next_link: A continuation link (absolute URI) to the next page of results in the list.
- :paramtype next_link: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword location: Same as workspace location.
+ :paramtype location: str
+ :keyword sku: Optional. This field is required to be implemented by the RP because AML is
+ supporting more than one tier.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :keyword tags: Dictionary of :code:``.
+ :paramtype tags: dict[str, str]
+ :keyword private_endpoint: The Private Endpoint resource.
+ :paramtype private_endpoint:
+ ~azure.mgmt.machinelearningservices.models.WorkspacePrivateEndpointResource
+ :keyword private_link_service_connection_state: The connection state.
+ :paramtype private_link_service_connection_state:
+ ~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
+ :keyword provisioning_state: The current provisioning state. Known values are: "Succeeded",
+ "Creating", "Deleting", and "Failed".
+ :paramtype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnectionProvisioningState
"""
super().__init__(**kwargs)
- self.value = value
- self.next_link = next_link
+ self.identity = identity
+ self.location = location
+ self.sku = sku
+ self.tags = tags
+ self.private_endpoint = private_endpoint
+ self.private_link_service_connection_state = private_link_service_connection_state
+ self.provisioning_state = provisioning_state
-class PartialBatchDeployment(_serialization.Model):
- """Mutable batch inference settings per deployment.
+class PrivateEndpointConnectionListResult(_serialization.Model):
+ """List of private endpoint connection associated with the specified workspace.
- :ivar description: Description of the endpoint deployment.
- :vartype description: str
+ :ivar value: Array of private endpoint connections.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
"""
_attribute_map = {
- "description": {"key": "description", "type": "str"},
+ "value": {"key": "value", "type": "[PrivateEndpointConnection]"},
}
- def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, value: Optional[List["_models.PrivateEndpointConnection"]] = None, **kwargs: Any) -> None:
"""
- :keyword description: Description of the endpoint deployment.
- :paramtype description: str
+ :keyword value: Array of private endpoint connections.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
"""
super().__init__(**kwargs)
- self.description = description
+ self.value = value
-class PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties(_serialization.Model):
- """Strictly used in update requests.
+class PrivateEndpointDestination(_serialization.Model):
+ """Private Endpoint destination for a Private Endpoint Outbound Rule for the managed network of a
+ machine learning workspace.
- :ivar properties: Additional attributes of the entity.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
+ :ivar service_resource_id:
+ :vartype service_resource_id: str
+ :ivar spark_enabled:
+ :vartype spark_enabled: bool
+ :ivar spark_status: Type of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Inactive" and "Active".
+ :vartype spark_status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar subresource_target:
+ :vartype subresource_target: str
"""
_attribute_map = {
- "properties": {"key": "properties", "type": "PartialBatchDeployment"},
- "tags": {"key": "tags", "type": "{str}"},
+ "service_resource_id": {"key": "serviceResourceId", "type": "str"},
+ "spark_enabled": {"key": "sparkEnabled", "type": "bool"},
+ "spark_status": {"key": "sparkStatus", "type": "str"},
+ "subresource_target": {"key": "subresourceTarget", "type": "str"},
}
def __init__(
self,
*,
- properties: Optional["_models.PartialBatchDeployment"] = None,
- tags: Optional[Dict[str, str]] = None,
+ service_resource_id: Optional[str] = None,
+ spark_enabled: Optional[bool] = None,
+ spark_status: Optional[Union[str, "_models.RuleStatus"]] = None,
+ subresource_target: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword properties: Additional attributes of the entity.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
+ :keyword service_resource_id:
+ :paramtype service_resource_id: str
+ :keyword spark_enabled:
+ :paramtype spark_enabled: bool
+ :keyword spark_status: Type of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Inactive" and "Active".
+ :paramtype spark_status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :keyword subresource_target:
+ :paramtype subresource_target: str
"""
super().__init__(**kwargs)
- self.properties = properties
- self.tags = tags
+ self.service_resource_id = service_resource_id
+ self.spark_enabled = spark_enabled
+ self.spark_status = spark_status
+ self.subresource_target = subresource_target
-class PartialManagedServiceIdentity(_serialization.Model):
- """Managed service identity (system assigned and/or user assigned identities).
+class PrivateEndpointOutboundRule(OutboundRule):
+ """Private Endpoint Outbound Rule for the managed network of a machine learning workspace.
- :ivar type: Managed service identity (system assigned and/or user assigned identities). Known
- values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned".
- :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :ivar user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :vartype user_assigned_identities: dict[str, JSON]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar category: Category of a managed network outbound rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", and "UserDefined".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :ivar status: Type of a managed network outbound rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar type: Type of a managed network outbound rule of a machine learning workspace. Required.
+ Known values are: "FQDN", "PrivateEndpoint", and "ServiceTag".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.RuleType
+ :ivar destination: Private Endpoint destination for a Private Endpoint Outbound Rule for the
+ managed network of a machine learning workspace.
+ :vartype destination: ~azure.mgmt.machinelearningservices.models.PrivateEndpointDestination
"""
+ _validation = {
+ "type": {"required": True},
+ }
+
_attribute_map = {
+ "category": {"key": "category", "type": "str"},
+ "status": {"key": "status", "type": "str"},
"type": {"key": "type", "type": "str"},
- "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{object}"},
+ "destination": {"key": "destination", "type": "PrivateEndpointDestination"},
}
def __init__(
self,
*,
- type: Optional[Union[str, "_models.ManagedServiceIdentityType"]] = None,
- user_assigned_identities: Optional[Dict[str, JSON]] = None,
+ category: Optional[Union[str, "_models.RuleCategory"]] = None,
+ status: Optional[Union[str, "_models.RuleStatus"]] = None,
+ destination: Optional["_models.PrivateEndpointDestination"] = None,
**kwargs: Any
) -> None:
"""
- :keyword type: Managed service identity (system assigned and/or user assigned identities).
- Known values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned".
- :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :keyword user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :paramtype user_assigned_identities: dict[str, JSON]
+ :keyword category: Category of a managed network outbound rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", and "UserDefined".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :keyword status: Type of a managed network outbound rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :keyword destination: Private Endpoint destination for a Private Endpoint Outbound Rule for the
+ managed network of a machine learning workspace.
+ :paramtype destination: ~azure.mgmt.machinelearningservices.models.PrivateEndpointDestination
"""
- super().__init__(**kwargs)
- self.type = type
- self.user_assigned_identities = user_assigned_identities
+ super().__init__(category=category, status=status, **kwargs)
+ self.type: str = "PrivateEndpoint"
+ self.destination = destination
-class PartialMinimalTrackedResource(_serialization.Model):
- """Strictly used in update requests.
+class PrivateEndpointResource(PrivateEndpoint):
+ """The PE network resource that is linked to this PE connection.
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: The ARM identifier for Private Endpoint.
+ :vartype id: str
+ :ivar subnet_arm_id: The subnetId that the private endpoint is connected to.
+ :vartype subnet_arm_id: str
"""
+ _validation = {
+ "id": {"readonly": True},
+ }
+
_attribute_map = {
- "tags": {"key": "tags", "type": "{str}"},
+ "id": {"key": "id", "type": "str"},
+ "subnet_arm_id": {"key": "subnetArmId", "type": "str"},
}
- def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
+ def __init__(self, *, subnet_arm_id: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
+ :keyword subnet_arm_id: The subnetId that the private endpoint is connected to.
+ :paramtype subnet_arm_id: str
"""
super().__init__(**kwargs)
- self.tags = tags
+ self.subnet_arm_id = subnet_arm_id
-class PartialMinimalTrackedResourceWithIdentity(PartialMinimalTrackedResource):
- """Strictly used in update requests.
+class PrivateLinkResource(Resource): # pylint: disable=too-many-instance-attributes
+ """A private link resource.
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar location: Same as workspace location.
+ :vartype location: str
+ :ivar sku: Optional. This field is required to be implemented by the RP because AML is
+ supporting more than one tier.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :ivar tags: Dictionary of :code:``.
+ :vartype tags: dict[str, str]
+ :ivar group_id: The private link resource group id.
+ :vartype group_id: str
+ :ivar required_members: The private link resource required member names.
+ :vartype required_members: list[str]
+ :ivar required_zone_names: The private link resource Private link DNS zone name.
+ :vartype required_zone_names: list[str]
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ }
+
_attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "location": {"key": "location", "type": "str"},
+ "sku": {"key": "sku", "type": "Sku"},
"tags": {"key": "tags", "type": "{str}"},
- "identity": {"key": "identity", "type": "PartialManagedServiceIdentity"},
+ "group_id": {"key": "properties.groupId", "type": "str"},
+ "required_members": {"key": "properties.requiredMembers", "type": "[str]"},
+ "required_zone_names": {"key": "properties.requiredZoneNames", "type": "[str]"},
}
def __init__(
self,
*,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ location: Optional[str] = None,
+ sku: Optional["_models.Sku"] = None,
tags: Optional[Dict[str, str]] = None,
- identity: Optional["_models.PartialManagedServiceIdentity"] = None,
+ group_id: Optional[str] = None,
+ required_members: Optional[List[str]] = None,
+ required_zone_names: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
:keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword location: Same as workspace location.
+ :paramtype location: str
+ :keyword sku: Optional. This field is required to be implemented by the RP because AML is
+ supporting more than one tier.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :keyword tags: Dictionary of :code:``.
+ :paramtype tags: dict[str, str]
+ :keyword group_id: The private link resource group id.
+ :paramtype group_id: str
+ :keyword required_members: The private link resource required member names.
+ :paramtype required_members: list[str]
+ :keyword required_zone_names: The private link resource Private link DNS zone name.
+ :paramtype required_zone_names: list[str]
"""
- super().__init__(tags=tags, **kwargs)
+ super().__init__(**kwargs)
self.identity = identity
+ self.location = location
+ self.sku = sku
+ self.tags = tags
+ self.group_id = group_id
+ self.required_members = required_members
+ self.required_zone_names = required_zone_names
-class PartialMinimalTrackedResourceWithSku(PartialMinimalTrackedResource):
- """Strictly used in update requests.
+class PrivateLinkResourceListResult(_serialization.Model):
+ """A list of private link resources.
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ :ivar value:
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
"""
_attribute_map = {
- "tags": {"key": "tags", "type": "{str}"},
- "sku": {"key": "sku", "type": "PartialSku"},
+ "value": {"key": "value", "type": "[PrivateLinkResource]"},
}
- def __init__(
- self, *, tags: Optional[Dict[str, str]] = None, sku: Optional["_models.PartialSku"] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, value: Optional[List["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ :keyword value:
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
"""
- super().__init__(tags=tags, **kwargs)
- self.sku = sku
+ super().__init__(**kwargs)
+ self.value = value
-class PartialRegistryPartialTrackedResource(_serialization.Model):
- """Strictly used in update requests.
+class PrivateLinkServiceConnectionState(_serialization.Model):
+ """A collection of information about the state of the connection between service consumer and
+ provider.
- :ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity:
- ~azure.mgmt.machinelearningservices.models.RegistryPartialManagedServiceIdentity
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
+ :ivar actions_required: Some RP chose "None". Other RPs use this for region expansion.
+ :vartype actions_required: str
+ :ivar description: User-defined message that, per NRP doc, may be used for approval-related
+ message.
+ :vartype description: str
+ :ivar status: Connection status of the service consumer with the service provider. Known values
+ are: "Approved", "Pending", "Rejected", "Disconnected", and "Timeout".
+ :vartype status: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
"""
_attribute_map = {
- "identity": {"key": "identity", "type": "RegistryPartialManagedServiceIdentity"},
- "sku": {"key": "sku", "type": "PartialSku"},
- "tags": {"key": "tags", "type": "{str}"},
+ "actions_required": {"key": "actionsRequired", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
- identity: Optional["_models.RegistryPartialManagedServiceIdentity"] = None,
- sku: Optional["_models.PartialSku"] = None,
- tags: Optional[Dict[str, str]] = None,
+ actions_required: Optional[str] = None,
+ description: Optional[str] = None,
+ status: Optional[Union[str, "_models.EndpointServiceConnectionStatus"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity:
- ~azure.mgmt.machinelearningservices.models.RegistryPartialManagedServiceIdentity
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
+ :keyword actions_required: Some RP chose "None". Other RPs use this for region expansion.
+ :paramtype actions_required: str
+ :keyword description: User-defined message that, per NRP doc, may be used for approval-related
+ message.
+ :paramtype description: str
+ :keyword status: Connection status of the service consumer with the service provider. Known
+ values are: "Approved", "Pending", "Rejected", "Disconnected", and "Timeout".
+ :paramtype status: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
"""
super().__init__(**kwargs)
- self.identity = identity
- self.sku = sku
- self.tags = tags
+ self.actions_required = actions_required
+ self.description = description
+ self.status = status
-class PartialSku(_serialization.Model):
- """Common SKU definition.
+class ProbeSettings(_serialization.Model):
+ """Deployment container liveness/readiness probe configuration.
- :ivar capacity: If the SKU supports scale out/in then the capacity integer should be included.
- If scale out/in is not possible for the resource this may be omitted.
- :vartype capacity: int
- :ivar family: If the service has different generations of hardware, for the same SKU, then that
- can be captured here.
- :vartype family: str
- :ivar name: The name of the SKU. Ex - P3. It is typically a letter+number code.
- :vartype name: str
- :ivar size: The SKU size. When the name field is the combination of tier and some other value,
- this would be the standalone code.
- :vartype size: str
- :ivar tier: This field is required to be implemented by the Resource Provider if the service
- has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
- "Standard", and "Premium".
- :vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ :ivar failure_threshold: The number of failures to allow before returning an unhealthy status.
+ :vartype failure_threshold: int
+ :ivar initial_delay: The delay before the first probe in ISO 8601 format.
+ :vartype initial_delay: ~datetime.timedelta
+ :ivar period: The length of time between probes in ISO 8601 format.
+ :vartype period: ~datetime.timedelta
+ :ivar success_threshold: The number of successful probes before returning a healthy status.
+ :vartype success_threshold: int
+ :ivar timeout: The probe timeout in ISO 8601 format.
+ :vartype timeout: ~datetime.timedelta
"""
_attribute_map = {
- "capacity": {"key": "capacity", "type": "int"},
- "family": {"key": "family", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "size": {"key": "size", "type": "str"},
- "tier": {"key": "tier", "type": "str"},
+ "failure_threshold": {"key": "failureThreshold", "type": "int"},
+ "initial_delay": {"key": "initialDelay", "type": "duration"},
+ "period": {"key": "period", "type": "duration"},
+ "success_threshold": {"key": "successThreshold", "type": "int"},
+ "timeout": {"key": "timeout", "type": "duration"},
}
def __init__(
self,
*,
- capacity: Optional[int] = None,
- family: Optional[str] = None,
- name: Optional[str] = None,
- size: Optional[str] = None,
- tier: Optional[Union[str, "_models.SkuTier"]] = None,
+ failure_threshold: int = 30,
+ initial_delay: Optional[datetime.timedelta] = None,
+ period: datetime.timedelta = "PT10S",
+ success_threshold: int = 1,
+ timeout: datetime.timedelta = "PT2S",
**kwargs: Any
) -> None:
"""
- :keyword capacity: If the SKU supports scale out/in then the capacity integer should be
- included. If scale out/in is not possible for the resource this may be omitted.
- :paramtype capacity: int
- :keyword family: If the service has different generations of hardware, for the same SKU, then
- that can be captured here.
- :paramtype family: str
- :keyword name: The name of the SKU. Ex - P3. It is typically a letter+number code.
- :paramtype name: str
- :keyword size: The SKU size. When the name field is the combination of tier and some other
- value, this would be the standalone code.
- :paramtype size: str
- :keyword tier: This field is required to be implemented by the Resource Provider if the service
- has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
- "Standard", and "Premium".
- :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ :keyword failure_threshold: The number of failures to allow before returning an unhealthy
+ status.
+ :paramtype failure_threshold: int
+ :keyword initial_delay: The delay before the first probe in ISO 8601 format.
+ :paramtype initial_delay: ~datetime.timedelta
+ :keyword period: The length of time between probes in ISO 8601 format.
+ :paramtype period: ~datetime.timedelta
+ :keyword success_threshold: The number of successful probes before returning a healthy status.
+ :paramtype success_threshold: int
+ :keyword timeout: The probe timeout in ISO 8601 format.
+ :paramtype timeout: ~datetime.timedelta
"""
super().__init__(**kwargs)
- self.capacity = capacity
- self.family = family
- self.name = name
- self.size = size
- self.tier = tier
+ self.failure_threshold = failure_threshold
+ self.initial_delay = initial_delay
+ self.period = period
+ self.success_threshold = success_threshold
+ self.timeout = timeout
-class Password(_serialization.Model):
- """Password.
+class ProgressMetrics(_serialization.Model):
+ """Progress metrics definition.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar name:
- :vartype name: str
- :ivar value:
- :vartype value: str
+ :ivar completed_datapoint_count: The completed datapoint count.
+ :vartype completed_datapoint_count: int
+ :ivar incremental_data_last_refresh_date_time: The time of last successful incremental data
+ refresh in UTC.
+ :vartype incremental_data_last_refresh_date_time: ~datetime.datetime
+ :ivar skipped_datapoint_count: The skipped datapoint count.
+ :vartype skipped_datapoint_count: int
+ :ivar total_datapoint_count: The total datapoint count.
+ :vartype total_datapoint_count: int
"""
_validation = {
- "name": {"readonly": True},
- "value": {"readonly": True},
+ "completed_datapoint_count": {"readonly": True},
+ "incremental_data_last_refresh_date_time": {"readonly": True},
+ "skipped_datapoint_count": {"readonly": True},
+ "total_datapoint_count": {"readonly": True},
}
_attribute_map = {
- "name": {"key": "name", "type": "str"},
- "value": {"key": "value", "type": "str"},
+ "completed_datapoint_count": {"key": "completedDatapointCount", "type": "int"},
+ "incremental_data_last_refresh_date_time": {"key": "incrementalDataLastRefreshDateTime", "type": "iso-8601"},
+ "skipped_datapoint_count": {"key": "skippedDatapointCount", "type": "int"},
+ "total_datapoint_count": {"key": "totalDatapointCount", "type": "int"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
- self.name = None
- self.value = None
+ self.completed_datapoint_count = None
+ self.incremental_data_last_refresh_date_time = None
+ self.skipped_datapoint_count = None
+ self.total_datapoint_count = None
-class PATAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
- """PATAuthTypeWorkspaceConnectionProperties.
+class PyTorch(DistributionConfiguration):
+ """PyTorch distribution configuration.
All required parameters must be populated in order to send to Azure.
- :ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
- :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
- :ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :ivar target:
- :vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :ivar credentials:
- :vartype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPersonalAccessToken
+ :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
+ Known values are: "PyTorch", "TensorFlow", "Mpi", and "Ray".
+ :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
+ :ivar process_count_per_instance: Number of processes per node.
+ :vartype process_count_per_instance: int
"""
_validation = {
- "auth_type": {"required": True},
+ "distribution_type": {"required": True},
}
_attribute_map = {
- "auth_type": {"key": "authType", "type": "str"},
- "category": {"key": "category", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
- "credentials": {"key": "credentials", "type": "WorkspaceConnectionPersonalAccessToken"},
+ "distribution_type": {"key": "distributionType", "type": "str"},
+ "process_count_per_instance": {"key": "processCountPerInstance", "type": "int"},
}
- def __init__(
- self,
- *,
- category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
- target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
- credentials: Optional["_models.WorkspaceConnectionPersonalAccessToken"] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, process_count_per_instance: Optional[int] = None, **kwargs: Any) -> None:
"""
- :keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :keyword target:
- :paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :keyword credentials:
- :paramtype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPersonalAccessToken
+ :keyword process_count_per_instance: Number of processes per node.
+ :paramtype process_count_per_instance: int
"""
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
- self.auth_type: str = "PAT"
- self.credentials = credentials
-
-
-class PendingUploadCredentialDto(_serialization.Model):
- """PendingUploadCredentialDto.
+ super().__init__(**kwargs)
+ self.distribution_type: str = "PyTorch"
+ self.process_count_per_instance = process_count_per_instance
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- SASCredentialDto
- All required parameters must be populated in order to send to Azure.
+class QueueSettings(_serialization.Model):
+ """QueueSettings.
- :ivar credential_type: [Required] Credential type used to authentication with storage.
- Required. "SAS"
- :vartype credential_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadCredentialType
+ :ivar job_tier: Controls the compute job tier. Known values are: "Null", "Spot", "Basic",
+ "Standard", and "Premium".
+ :vartype job_tier: str or ~azure.mgmt.machinelearningservices.models.JobTier
+ :ivar priority: Controls the priority of the job on a compute.
+ :vartype priority: int
"""
- _validation = {
- "credential_type": {"required": True},
- }
-
_attribute_map = {
- "credential_type": {"key": "credentialType", "type": "str"},
+ "job_tier": {"key": "jobTier", "type": "str"},
+ "priority": {"key": "priority", "type": "int"},
}
- _subtype_map = {"credential_type": {"SAS": "SASCredentialDto"}}
-
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self, *, job_tier: Optional[Union[str, "_models.JobTier"]] = None, priority: Optional[int] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword job_tier: Controls the compute job tier. Known values are: "Null", "Spot", "Basic",
+ "Standard", and "Premium".
+ :paramtype job_tier: str or ~azure.mgmt.machinelearningservices.models.JobTier
+ :keyword priority: Controls the priority of the job on a compute.
+ :paramtype priority: int
+ """
super().__init__(**kwargs)
- self.credential_type: Optional[str] = None
+ self.job_tier = job_tier
+ self.priority = priority
-class PendingUploadRequestDto(_serialization.Model):
- """PendingUploadRequestDto.
+class QuotaBaseProperties(_serialization.Model):
+ """The properties for Quota update or retrieval.
- :ivar pending_upload_id: If PendingUploadId = null then random guid will be used.
- :vartype pending_upload_id: str
- :ivar pending_upload_type: TemporaryBlobReference is the only supported type. Known values are:
- "None" and "TemporaryBlobReference".
- :vartype pending_upload_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadType
+ :ivar id: Specifies the resource ID.
+ :vartype id: str
+ :ivar type: Specifies the resource type.
+ :vartype type: str
+ :ivar limit: The maximum permitted quota of the resource.
+ :vartype limit: int
+ :ivar unit: An enum describing the unit of quota measurement. "Count"
+ :vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
_attribute_map = {
- "pending_upload_id": {"key": "pendingUploadId", "type": "str"},
- "pending_upload_type": {"key": "pendingUploadType", "type": "str"},
+ "id": {"key": "id", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "limit": {"key": "limit", "type": "int"},
+ "unit": {"key": "unit", "type": "str"},
}
def __init__(
self,
*,
- pending_upload_id: Optional[str] = None,
- pending_upload_type: Optional[Union[str, "_models.PendingUploadType"]] = None,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ type: Optional[str] = None,
+ limit: Optional[int] = None,
+ unit: Optional[Union[str, "_models.QuotaUnit"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword pending_upload_id: If PendingUploadId = null then random guid will be used.
- :paramtype pending_upload_id: str
- :keyword pending_upload_type: TemporaryBlobReference is the only supported type. Known values
- are: "None" and "TemporaryBlobReference".
- :paramtype pending_upload_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadType
+ :keyword id: Specifies the resource ID.
+ :paramtype id: str
+ :keyword type: Specifies the resource type.
+ :paramtype type: str
+ :keyword limit: The maximum permitted quota of the resource.
+ :paramtype limit: int
+ :keyword unit: An enum describing the unit of quota measurement. "Count"
+ :paramtype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
super().__init__(**kwargs)
- self.pending_upload_id = pending_upload_id
- self.pending_upload_type = pending_upload_type
+ self.id = id
+ self.type = type
+ self.limit = limit
+ self.unit = unit
-class PendingUploadResponseDto(_serialization.Model):
- """PendingUploadResponseDto.
+class QuotaUpdateParameters(_serialization.Model):
+ """Quota update parameters.
- :ivar blob_reference_for_consumption: Container level read, write, list SAS.
- :vartype blob_reference_for_consumption:
- ~azure.mgmt.machinelearningservices.models.BlobReferenceForConsumptionDto
- :ivar pending_upload_id: ID for this upload request.
- :vartype pending_upload_id: str
- :ivar pending_upload_type: TemporaryBlobReference is the only supported type. Known values are:
- "None" and "TemporaryBlobReference".
- :vartype pending_upload_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadType
+ :ivar value: The list for update quota.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
+ :ivar location: Region of workspace quota to be updated.
+ :vartype location: str
"""
_attribute_map = {
- "blob_reference_for_consumption": {
- "key": "blobReferenceForConsumption",
- "type": "BlobReferenceForConsumptionDto",
- },
- "pending_upload_id": {"key": "pendingUploadId", "type": "str"},
- "pending_upload_type": {"key": "pendingUploadType", "type": "str"},
+ "value": {"key": "value", "type": "[QuotaBaseProperties]"},
+ "location": {"key": "location", "type": "str"},
}
def __init__(
self,
*,
- blob_reference_for_consumption: Optional["_models.BlobReferenceForConsumptionDto"] = None,
- pending_upload_id: Optional[str] = None,
- pending_upload_type: Optional[Union[str, "_models.PendingUploadType"]] = None,
+ value: Optional[List["_models.QuotaBaseProperties"]] = None,
+ location: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword blob_reference_for_consumption: Container level read, write, list SAS.
- :paramtype blob_reference_for_consumption:
- ~azure.mgmt.machinelearningservices.models.BlobReferenceForConsumptionDto
- :keyword pending_upload_id: ID for this upload request.
- :paramtype pending_upload_id: str
- :keyword pending_upload_type: TemporaryBlobReference is the only supported type. Known values
- are: "None" and "TemporaryBlobReference".
- :paramtype pending_upload_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadType
+ :keyword value: The list for update quota.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
+ :keyword location: Region of workspace quota to be updated.
+ :paramtype location: str
"""
super().__init__(**kwargs)
- self.blob_reference_for_consumption = blob_reference_for_consumption
- self.pending_upload_id = pending_upload_id
- self.pending_upload_type = pending_upload_type
+ self.value = value
+ self.location = location
-class PersonalComputeInstanceSettings(_serialization.Model):
- """Settings for a personal compute instance.
+class RandomSamplingAlgorithm(SamplingAlgorithm):
+ """Defines a Sampling Algorithm that generates values randomly.
- :ivar assigned_user: A user explicitly assigned to a personal compute instance.
- :vartype assigned_user: ~azure.mgmt.machinelearningservices.models.AssignedUser
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
+ values, along with configuration properties. Required. Known values are: "Grid", "Random", and
+ "Bayesian".
+ :vartype sampling_algorithm_type: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ :ivar logbase: An optional positive number or e in string format to be used as base for log
+ based random sampling.
+ :vartype logbase: str
+ :ivar rule: The specific type of random algorithm. Known values are: "Random" and "Sobol".
+ :vartype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
+ :ivar seed: An optional integer to use as the seed for random number generation.
+ :vartype seed: int
"""
+ _validation = {
+ "sampling_algorithm_type": {"required": True},
+ }
+
_attribute_map = {
- "assigned_user": {"key": "assignedUser", "type": "AssignedUser"},
+ "sampling_algorithm_type": {"key": "samplingAlgorithmType", "type": "str"},
+ "logbase": {"key": "logbase", "type": "str"},
+ "rule": {"key": "rule", "type": "str"},
+ "seed": {"key": "seed", "type": "int"},
}
- def __init__(self, *, assigned_user: Optional["_models.AssignedUser"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ logbase: Optional[str] = None,
+ rule: Optional[Union[str, "_models.RandomSamplingAlgorithmRule"]] = None,
+ seed: Optional[int] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword assigned_user: A user explicitly assigned to a personal compute instance.
- :paramtype assigned_user: ~azure.mgmt.machinelearningservices.models.AssignedUser
+ :keyword logbase: An optional positive number or e in string format to be used as base for log
+ based random sampling.
+ :paramtype logbase: str
+ :keyword rule: The specific type of random algorithm. Known values are: "Random" and "Sobol".
+ :paramtype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
+ :keyword seed: An optional integer to use as the seed for random number generation.
+ :paramtype seed: int
"""
super().__init__(**kwargs)
- self.assigned_user = assigned_user
-
+ self.sampling_algorithm_type: str = "Random"
+ self.logbase = logbase
+ self.rule = rule
+ self.seed = seed
-class PipelineJob(JobBaseProperties): # pylint: disable=too-many-instance-attributes
- """Pipeline Job definition: defines generic to MFE attributes.
- Variables are only populated by the server, and will be ignored when sending a request.
+class Ray(DistributionConfiguration):
+ """Ray distribution configuration.
All required parameters must be populated in order to send to Azure.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar component_id: ARM resource ID of the component resource.
- :vartype component_id: str
- :ivar compute_id: ARM resource ID of the compute resource.
- :vartype compute_id: str
- :ivar display_name: Display name of job.
- :vartype display_name: str
- :ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
- placed in the "Default" experiment.
- :vartype experiment_name: str
- :ivar identity: Identity configuration. If set, this should be one of AmlToken,
- ManagedIdentity, UserIdentity or null.
- Defaults to AmlToken if null.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
- :vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
- :ivar services: List of JobEndpoints.
- For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
- :vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
- :ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
- "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
- "Canceled", "NotResponding", "Paused", and "Unknown".
- :vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
- :ivar inputs: Inputs for the pipeline job.
- :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
- :ivar jobs: Jobs construct the Pipeline Job.
- :vartype jobs: dict[str, JSON]
- :ivar outputs: Outputs for the pipeline job.
- :vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
- :ivar settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
- :vartype settings: JSON
- :ivar source_job_id: ARM resource ID of source job.
- :vartype source_job_id: str
+ :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
+ Known values are: "PyTorch", "TensorFlow", "Mpi", and "Ray".
+ :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
+ :ivar address: The address of Ray head node.
+ :vartype address: str
+ :ivar dashboard_port: The port to bind the dashboard server to.
+ :vartype dashboard_port: int
+ :ivar head_node_additional_args: Additional arguments passed to ray start in head node.
+ :vartype head_node_additional_args: str
+ :ivar include_dashboard: Provide this argument to start the Ray dashboard GUI.
+ :vartype include_dashboard: bool
+ :ivar port: The port of the head ray process.
+ :vartype port: int
+ :ivar worker_node_additional_args: Additional arguments passed to ray start in worker node.
+ :vartype worker_node_additional_args: str
"""
_validation = {
- "job_type": {"required": True},
- "status": {"readonly": True},
+ "distribution_type": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "component_id": {"key": "componentId", "type": "str"},
- "compute_id": {"key": "computeId", "type": "str"},
- "display_name": {"key": "displayName", "type": "str"},
- "experiment_name": {"key": "experimentName", "type": "str"},
- "identity": {"key": "identity", "type": "IdentityConfiguration"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "job_type": {"key": "jobType", "type": "str"},
- "services": {"key": "services", "type": "{JobService}"},
- "status": {"key": "status", "type": "str"},
- "inputs": {"key": "inputs", "type": "{JobInput}"},
- "jobs": {"key": "jobs", "type": "{object}"},
- "outputs": {"key": "outputs", "type": "{JobOutput}"},
- "settings": {"key": "settings", "type": "object"},
- "source_job_id": {"key": "sourceJobId", "type": "str"},
+ "distribution_type": {"key": "distributionType", "type": "str"},
+ "address": {"key": "address", "type": "str"},
+ "dashboard_port": {"key": "dashboardPort", "type": "int"},
+ "head_node_additional_args": {"key": "headNodeAdditionalArgs", "type": "str"},
+ "include_dashboard": {"key": "includeDashboard", "type": "bool"},
+ "port": {"key": "port", "type": "int"},
+ "worker_node_additional_args": {"key": "workerNodeAdditionalArgs", "type": "str"},
}
def __init__(
self,
*,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- component_id: Optional[str] = None,
- compute_id: Optional[str] = None,
- display_name: Optional[str] = None,
- experiment_name: str = "Default",
- identity: Optional["_models.IdentityConfiguration"] = None,
- is_archived: bool = False,
- services: Optional[Dict[str, "_models.JobService"]] = None,
- inputs: Optional[Dict[str, "_models.JobInput"]] = None,
- jobs: Optional[Dict[str, JSON]] = None,
- outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
- settings: Optional[JSON] = None,
- source_job_id: Optional[str] = None,
+ address: Optional[str] = None,
+ dashboard_port: Optional[int] = None,
+ head_node_additional_args: Optional[str] = None,
+ include_dashboard: Optional[bool] = None,
+ port: Optional[int] = None,
+ worker_node_additional_args: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword component_id: ARM resource ID of the component resource.
- :paramtype component_id: str
- :keyword compute_id: ARM resource ID of the compute resource.
- :paramtype compute_id: str
- :keyword display_name: Display name of job.
- :paramtype display_name: str
- :keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
- placed in the "Default" experiment.
- :paramtype experiment_name: str
- :keyword identity: Identity configuration. If set, this should be one of AmlToken,
- ManagedIdentity, UserIdentity or null.
- Defaults to AmlToken if null.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword services: List of JobEndpoints.
- For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
- :paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
- :keyword inputs: Inputs for the pipeline job.
- :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
- :keyword jobs: Jobs construct the Pipeline Job.
- :paramtype jobs: dict[str, JSON]
- :keyword outputs: Outputs for the pipeline job.
- :paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
- :keyword settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
- :paramtype settings: JSON
- :keyword source_job_id: ARM resource ID of source job.
- :paramtype source_job_id: str
+ :keyword address: The address of Ray head node.
+ :paramtype address: str
+ :keyword dashboard_port: The port to bind the dashboard server to.
+ :paramtype dashboard_port: int
+ :keyword head_node_additional_args: Additional arguments passed to ray start in head node.
+ :paramtype head_node_additional_args: str
+ :keyword include_dashboard: Provide this argument to start the Ray dashboard GUI.
+ :paramtype include_dashboard: bool
+ :keyword port: The port of the head ray process.
+ :paramtype port: int
+ :keyword worker_node_additional_args: Additional arguments passed to ray start in worker node.
+ :paramtype worker_node_additional_args: str
"""
- super().__init__(
- description=description,
- properties=properties,
- tags=tags,
- component_id=component_id,
- compute_id=compute_id,
- display_name=display_name,
- experiment_name=experiment_name,
- identity=identity,
- is_archived=is_archived,
- services=services,
- **kwargs
- )
- self.job_type: str = "Pipeline"
- self.inputs = inputs
- self.jobs = jobs
- self.outputs = outputs
- self.settings = settings
- self.source_job_id = source_job_id
-
-
-class PrivateEndpoint(_serialization.Model):
- """The Private Endpoint resource.
+ super().__init__(**kwargs)
+ self.distribution_type: str = "Ray"
+ self.address = address
+ self.dashboard_port = dashboard_port
+ self.head_node_additional_args = head_node_additional_args
+ self.include_dashboard = include_dashboard
+ self.port = port
+ self.worker_node_additional_args = worker_node_additional_args
- Variables are only populated by the server, and will be ignored when sending a request.
- :ivar id: The ARM identifier for Private Endpoint.
- :vartype id: str
- """
+class Recurrence(_serialization.Model):
+ """The workflow trigger recurrence for ComputeStartStop schedule type.
- _validation = {
- "id": {"readonly": True},
- }
+ :ivar frequency: [Required] The frequency to trigger schedule. Known values are: "Minute",
+ "Hour", "Day", "Week", and "Month".
+ :vartype frequency: str or
+ ~azure.mgmt.machinelearningservices.models.ComputeRecurrenceFrequency
+ :ivar interval: [Required] Specifies schedule interval in conjunction with frequency.
+ :vartype interval: int
+ :ivar start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
+ :vartype start_time: str
+ :ivar time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :vartype time_zone: str
+ :ivar schedule: [Required] The recurrence schedule.
+ :vartype schedule: ~azure.mgmt.machinelearningservices.models.ComputeRecurrenceSchedule
+ """
_attribute_map = {
- "id": {"key": "id", "type": "str"},
+ "frequency": {"key": "frequency", "type": "str"},
+ "interval": {"key": "interval", "type": "int"},
+ "start_time": {"key": "startTime", "type": "str"},
+ "time_zone": {"key": "timeZone", "type": "str"},
+ "schedule": {"key": "schedule", "type": "ComputeRecurrenceSchedule"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self,
+ *,
+ frequency: Optional[Union[str, "_models.ComputeRecurrenceFrequency"]] = None,
+ interval: Optional[int] = None,
+ start_time: Optional[str] = None,
+ time_zone: str = "UTC",
+ schedule: Optional["_models.ComputeRecurrenceSchedule"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword frequency: [Required] The frequency to trigger schedule. Known values are: "Minute",
+ "Hour", "Day", "Week", and "Month".
+ :paramtype frequency: str or
+ ~azure.mgmt.machinelearningservices.models.ComputeRecurrenceFrequency
+ :keyword interval: [Required] Specifies schedule interval in conjunction with frequency.
+ :paramtype interval: int
+ :keyword start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
+ :paramtype start_time: str
+ :keyword time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :paramtype time_zone: str
+ :keyword schedule: [Required] The recurrence schedule.
+ :paramtype schedule: ~azure.mgmt.machinelearningservices.models.ComputeRecurrenceSchedule
+ """
super().__init__(**kwargs)
- self.id = None
+ self.frequency = frequency
+ self.interval = interval
+ self.start_time = start_time
+ self.time_zone = time_zone
+ self.schedule = schedule
-class PrivateEndpointConnection(Resource): # pylint: disable=too-many-instance-attributes
- """The Private Endpoint Connection resource.
+class RecurrenceSchedule(_serialization.Model):
+ """RecurrenceSchedule.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar identity: The identity of the resource.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar location: Specifies the location of the resource.
- :vartype location: str
- :ivar tags: Contains resource tags defined as key/value pairs.
- :vartype tags: dict[str, str]
- :ivar sku: The sku of the workspace.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :ivar private_endpoint: The resource of private end point.
- :vartype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpoint
- :ivar private_link_service_connection_state: A collection of information about the state of the
- connection between service consumer and provider.
- :vartype private_link_service_connection_state:
- ~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
- :ivar provisioning_state: The provisioning state of the private endpoint connection resource.
- Known values are: "Succeeded", "Creating", "Deleting", and "Failed".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnectionProvisioningState
+ :ivar hours: [Required] List of hours for the schedule. Required.
+ :vartype hours: list[int]
+ :ivar minutes: [Required] List of minutes for the schedule. Required.
+ :vartype minutes: list[int]
+ :ivar month_days: List of month days for the schedule.
+ :vartype month_days: list[int]
+ :ivar week_days: List of days for the schedule.
+ :vartype week_days: list[str or ~azure.mgmt.machinelearningservices.models.WeekDay]
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "provisioning_state": {"readonly": True},
+ "hours": {"required": True},
+ "minutes": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "location": {"key": "location", "type": "str"},
- "tags": {"key": "tags", "type": "{str}"},
- "sku": {"key": "sku", "type": "Sku"},
- "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"},
- "private_link_service_connection_state": {
- "key": "properties.privateLinkServiceConnectionState",
- "type": "PrivateLinkServiceConnectionState",
- },
- "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "hours": {"key": "hours", "type": "[int]"},
+ "minutes": {"key": "minutes", "type": "[int]"},
+ "month_days": {"key": "monthDays", "type": "[int]"},
+ "week_days": {"key": "weekDays", "type": "[str]"},
}
def __init__(
self,
*,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- location: Optional[str] = None,
- tags: Optional[Dict[str, str]] = None,
- sku: Optional["_models.Sku"] = None,
- private_endpoint: Optional["_models.PrivateEndpoint"] = None,
- private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None,
+ hours: List[int],
+ minutes: List[int],
+ month_days: Optional[List[int]] = None,
+ week_days: Optional[List[Union[str, "_models.WeekDay"]]] = None,
**kwargs: Any
) -> None:
"""
- :keyword identity: The identity of the resource.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword location: Specifies the location of the resource.
- :paramtype location: str
- :keyword tags: Contains resource tags defined as key/value pairs.
- :paramtype tags: dict[str, str]
- :keyword sku: The sku of the workspace.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :keyword private_endpoint: The resource of private end point.
- :paramtype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpoint
- :keyword private_link_service_connection_state: A collection of information about the state of
- the connection between service consumer and provider.
- :paramtype private_link_service_connection_state:
- ~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
+ :keyword hours: [Required] List of hours for the schedule. Required.
+ :paramtype hours: list[int]
+ :keyword minutes: [Required] List of minutes for the schedule. Required.
+ :paramtype minutes: list[int]
+ :keyword month_days: List of month days for the schedule.
+ :paramtype month_days: list[int]
+ :keyword week_days: List of days for the schedule.
+ :paramtype week_days: list[str or ~azure.mgmt.machinelearningservices.models.WeekDay]
"""
super().__init__(**kwargs)
- self.identity = identity
- self.location = location
- self.tags = tags
- self.sku = sku
- self.private_endpoint = private_endpoint
- self.private_link_service_connection_state = private_link_service_connection_state
- self.provisioning_state = None
+ self.hours = hours
+ self.minutes = minutes
+ self.month_days = month_days
+ self.week_days = week_days
-class PrivateEndpointConnectionListResult(_serialization.Model):
- """List of private endpoint connection associated with the specified workspace.
+class RecurrenceTrigger(TriggerBase):
+ """RecurrenceTrigger.
- :ivar value: Array of private endpoint connections.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
+ https://en.wikipedia.org/wiki/ISO_8601.
+ Recommented format would be "2022-06-01T00:00:01"
+ If not present, the schedule will run indefinitely.
+ :vartype end_time: str
+ :ivar start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
+ offset.
+ :vartype start_time: str
+ :ivar time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :vartype time_zone: str
+ :ivar trigger_type: [Required]. Required. Known values are: "Recurrence" and "Cron".
+ :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
+ :ivar frequency: [Required] The frequency to trigger schedule. Required. Known values are:
+ "Minute", "Hour", "Day", "Week", and "Month".
+ :vartype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
+ :ivar interval: [Required] Specifies schedule interval in conjunction with frequency. Required.
+ :vartype interval: int
+ :ivar schedule: The recurrence schedule.
+ :vartype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
"""
+ _validation = {
+ "trigger_type": {"required": True},
+ "frequency": {"required": True},
+ "interval": {"required": True},
+ }
+
_attribute_map = {
- "value": {"key": "value", "type": "[PrivateEndpointConnection]"},
+ "end_time": {"key": "endTime", "type": "str"},
+ "start_time": {"key": "startTime", "type": "str"},
+ "time_zone": {"key": "timeZone", "type": "str"},
+ "trigger_type": {"key": "triggerType", "type": "str"},
+ "frequency": {"key": "frequency", "type": "str"},
+ "interval": {"key": "interval", "type": "int"},
+ "schedule": {"key": "schedule", "type": "RecurrenceSchedule"},
}
- def __init__(self, *, value: Optional[List["_models.PrivateEndpointConnection"]] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ frequency: Union[str, "_models.RecurrenceFrequency"],
+ interval: int,
+ end_time: Optional[str] = None,
+ start_time: Optional[str] = None,
+ time_zone: str = "UTC",
+ schedule: Optional["_models.RecurrenceSchedule"] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword value: Array of private endpoint connections.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
+ :keyword end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
+ https://en.wikipedia.org/wiki/ISO_8601.
+ Recommented format would be "2022-06-01T00:00:01"
+ If not present, the schedule will run indefinitely.
+ :paramtype end_time: str
+ :keyword start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
+ offset.
+ :paramtype start_time: str
+ :keyword time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :paramtype time_zone: str
+ :keyword frequency: [Required] The frequency to trigger schedule. Required. Known values are:
+ "Minute", "Hour", "Day", "Week", and "Month".
+ :paramtype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
+ :keyword interval: [Required] Specifies schedule interval in conjunction with frequency.
+ Required.
+ :paramtype interval: int
+ :keyword schedule: The recurrence schedule.
+ :paramtype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
"""
- super().__init__(**kwargs)
- self.value = value
+ super().__init__(end_time=end_time, start_time=start_time, time_zone=time_zone, **kwargs)
+ self.trigger_type: str = "Recurrence"
+ self.frequency = frequency
+ self.interval = interval
+ self.schedule = schedule
-class PrivateEndpointResource(PrivateEndpoint):
- """The PE network resource that is linked to this PE connection.
+class RegenerateEndpointKeysRequest(_serialization.Model):
+ """RegenerateEndpointKeysRequest.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar id: The ARM identifier for Private Endpoint.
- :vartype id: str
- :ivar subnet_arm_id: The subnetId that the private endpoint is connected to.
- :vartype subnet_arm_id: str
+ :ivar key_type: [Required] Specification for which type of key to generate. Primary or
+ Secondary. Required. Known values are: "Primary" and "Secondary".
+ :vartype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
+ :ivar key_value: The value the key is set to.
+ :vartype key_value: str
"""
_validation = {
- "id": {"readonly": True},
+ "key_type": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "subnet_arm_id": {"key": "subnetArmId", "type": "str"},
+ "key_type": {"key": "keyType", "type": "str"},
+ "key_value": {"key": "keyValue", "type": "str"},
}
- def __init__(self, *, subnet_arm_id: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, key_type: Union[str, "_models.KeyType"], key_value: Optional[str] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword subnet_arm_id: The subnetId that the private endpoint is connected to.
- :paramtype subnet_arm_id: str
+ :keyword key_type: [Required] Specification for which type of key to generate. Primary or
+ Secondary. Required. Known values are: "Primary" and "Secondary".
+ :paramtype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
+ :keyword key_value: The value the key is set to.
+ :paramtype key_value: str
"""
super().__init__(**kwargs)
- self.subnet_arm_id = subnet_arm_id
+ self.key_type = key_type
+ self.key_value = key_value
-class PrivateLinkResource(Resource): # pylint: disable=too-many-instance-attributes
- """A private link resource.
+class Registry(TrackedResource): # pylint: disable=too-many-instance-attributes
+ """Registry.
Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
+
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
@@ -17062,20 +26331,36 @@ class PrivateLinkResource(Resource): # pylint: disable=too-many-instance-attrib
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar identity: The identity of the resource.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar location: Specifies the location of the resource.
- :vartype location: str
- :ivar tags: Contains resource tags defined as key/value pairs.
+ :ivar tags: Resource tags.
:vartype tags: dict[str, str]
- :ivar sku: The sku of the workspace.
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :vartype kind: str
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :ivar group_id: The private link resource group id.
- :vartype group_id: str
- :ivar required_members: The private link resource required member names.
- :vartype required_members: list[str]
- :ivar required_zone_names: The private link resource Private link DNS zone name.
- :vartype required_zone_names: list[str]
+ :ivar discovery_url: Discovery URL for the Registry.
+ :vartype discovery_url: str
+ :ivar intellectual_property_publisher: IntellectualPropertyPublisher for the registry.
+ :vartype intellectual_property_publisher: str
+ :ivar managed_resource_group: ResourceId of the managed RG if the registry has system created
+ resources.
+ :vartype managed_resource_group: ~azure.mgmt.machinelearningservices.models.ArmResourceId
+ :ivar ml_flow_registry_uri: MLFlow Registry URI for the Registry.
+ :vartype ml_flow_registry_uri: str
+ :ivar registry_private_endpoint_connections: Private endpoint connections info used for pending
+ connections in private link portal.
+ :vartype registry_private_endpoint_connections:
+ list[~azure.mgmt.machinelearningservices.models.RegistryPrivateEndpointConnection]
+ :ivar public_network_access: Is the Registry accessible from the internet?
+ Possible values: "Enabled" or "Disabled".
+ :vartype public_network_access: str
+ :ivar region_details: Details of each region the registry is in.
+ :vartype region_details:
+ list[~azure.mgmt.machinelearningservices.models.RegistryRegionArmDetails]
"""
_validation = {
@@ -17083,8 +26368,7 @@ class PrivateLinkResource(Resource): # pylint: disable=too-many-instance-attrib
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
- "group_id": {"readonly": True},
- "required_members": {"readonly": True},
+ "location": {"required": True},
}
_attribute_map = {
@@ -17092,1613 +26376,1566 @@ class PrivateLinkResource(Resource): # pylint: disable=too-many-instance-attrib
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
"sku": {"key": "sku", "type": "Sku"},
- "group_id": {"key": "properties.groupId", "type": "str"},
- "required_members": {"key": "properties.requiredMembers", "type": "[str]"},
- "required_zone_names": {"key": "properties.requiredZoneNames", "type": "[str]"},
+ "discovery_url": {"key": "properties.discoveryUrl", "type": "str"},
+ "intellectual_property_publisher": {"key": "properties.intellectualPropertyPublisher", "type": "str"},
+ "managed_resource_group": {"key": "properties.managedResourceGroup", "type": "ArmResourceId"},
+ "ml_flow_registry_uri": {"key": "properties.mlFlowRegistryUri", "type": "str"},
+ "registry_private_endpoint_connections": {
+ "key": "properties.registryPrivateEndpointConnections",
+ "type": "[RegistryPrivateEndpointConnection]",
+ },
+ "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
+ "region_details": {"key": "properties.regionDetails", "type": "[RegistryRegionArmDetails]"},
}
def __init__(
self,
*,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- location: Optional[str] = None,
+ location: str,
tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
sku: Optional["_models.Sku"] = None,
- required_zone_names: Optional[List[str]] = None,
+ discovery_url: Optional[str] = None,
+ intellectual_property_publisher: Optional[str] = None,
+ managed_resource_group: Optional["_models.ArmResourceId"] = None,
+ ml_flow_registry_uri: Optional[str] = None,
+ registry_private_endpoint_connections: Optional[List["_models.RegistryPrivateEndpointConnection"]] = None,
+ public_network_access: Optional[str] = None,
+ region_details: Optional[List["_models.RegistryRegionArmDetails"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword identity: The identity of the resource.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword location: Specifies the location of the resource.
- :paramtype location: str
- :keyword tags: Contains resource tags defined as key/value pairs.
+ :keyword tags: Resource tags.
:paramtype tags: dict[str, str]
- :keyword sku: The sku of the workspace.
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :paramtype kind: str
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :keyword required_zone_names: The private link resource Private link DNS zone name.
- :paramtype required_zone_names: list[str]
+ :keyword discovery_url: Discovery URL for the Registry.
+ :paramtype discovery_url: str
+ :keyword intellectual_property_publisher: IntellectualPropertyPublisher for the registry.
+ :paramtype intellectual_property_publisher: str
+ :keyword managed_resource_group: ResourceId of the managed RG if the registry has system
+ created resources.
+ :paramtype managed_resource_group: ~azure.mgmt.machinelearningservices.models.ArmResourceId
+ :keyword ml_flow_registry_uri: MLFlow Registry URI for the Registry.
+ :paramtype ml_flow_registry_uri: str
+ :keyword registry_private_endpoint_connections: Private endpoint connections info used for
+ pending connections in private link portal.
+ :paramtype registry_private_endpoint_connections:
+ list[~azure.mgmt.machinelearningservices.models.RegistryPrivateEndpointConnection]
+ :keyword public_network_access: Is the Registry accessible from the internet?
+ Possible values: "Enabled" or "Disabled".
+ :paramtype public_network_access: str
+ :keyword region_details: Details of each region the registry is in.
+ :paramtype region_details:
+ list[~azure.mgmt.machinelearningservices.models.RegistryRegionArmDetails]
"""
- super().__init__(**kwargs)
+ super().__init__(tags=tags, location=location, **kwargs)
self.identity = identity
- self.location = location
- self.tags = tags
+ self.kind = kind
self.sku = sku
- self.group_id = None
- self.required_members = None
- self.required_zone_names = required_zone_names
-
-
-class PrivateLinkResourceListResult(_serialization.Model):
- """A list of private link resources.
-
- :ivar value: Array of private link resources.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
- """
-
- _attribute_map = {
- "value": {"key": "value", "type": "[PrivateLinkResource]"},
- }
-
- def __init__(self, *, value: Optional[List["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None:
- """
- :keyword value: Array of private link resources.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
- """
- super().__init__(**kwargs)
- self.value = value
-
-
-class PrivateLinkServiceConnectionState(_serialization.Model):
- """A collection of information about the state of the connection between service consumer and
- provider.
-
- :ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
- of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
- "Timeout".
- :vartype status: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
- :ivar description: The reason for approval/rejection of the connection.
- :vartype description: str
- :ivar actions_required: A message indicating if changes on the service provider require any
- updates on the consumer.
- :vartype actions_required: str
- """
-
- _attribute_map = {
- "status": {"key": "status", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "actions_required": {"key": "actionsRequired", "type": "str"},
- }
-
- def __init__(
- self,
- *,
- status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
- description: Optional[str] = None,
- actions_required: Optional[str] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword status: Indicates whether the connection has been Approved/Rejected/Removed by the
- owner of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
- "Timeout".
- :paramtype status: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
- :keyword description: The reason for approval/rejection of the connection.
- :paramtype description: str
- :keyword actions_required: A message indicating if changes on the service provider require any
- updates on the consumer.
- :paramtype actions_required: str
- """
- super().__init__(**kwargs)
- self.status = status
- self.description = description
- self.actions_required = actions_required
-
-
-class ProbeSettings(_serialization.Model):
- """Deployment container liveness/readiness probe configuration.
-
- :ivar failure_threshold: The number of failures to allow before returning an unhealthy status.
- :vartype failure_threshold: int
- :ivar initial_delay: The delay before the first probe in ISO 8601 format.
- :vartype initial_delay: ~datetime.timedelta
- :ivar period: The length of time between probes in ISO 8601 format.
- :vartype period: ~datetime.timedelta
- :ivar success_threshold: The number of successful probes before returning a healthy status.
- :vartype success_threshold: int
- :ivar timeout: The probe timeout in ISO 8601 format.
- :vartype timeout: ~datetime.timedelta
- """
-
- _attribute_map = {
- "failure_threshold": {"key": "failureThreshold", "type": "int"},
- "initial_delay": {"key": "initialDelay", "type": "duration"},
- "period": {"key": "period", "type": "duration"},
- "success_threshold": {"key": "successThreshold", "type": "int"},
- "timeout": {"key": "timeout", "type": "duration"},
- }
-
- def __init__(
- self,
- *,
- failure_threshold: int = 30,
- initial_delay: Optional[datetime.timedelta] = None,
- period: datetime.timedelta = "PT10S",
- success_threshold: int = 1,
- timeout: datetime.timedelta = "PT2S",
- **kwargs: Any
- ) -> None:
- """
- :keyword failure_threshold: The number of failures to allow before returning an unhealthy
- status.
- :paramtype failure_threshold: int
- :keyword initial_delay: The delay before the first probe in ISO 8601 format.
- :paramtype initial_delay: ~datetime.timedelta
- :keyword period: The length of time between probes in ISO 8601 format.
- :paramtype period: ~datetime.timedelta
- :keyword success_threshold: The number of successful probes before returning a healthy status.
- :paramtype success_threshold: int
- :keyword timeout: The probe timeout in ISO 8601 format.
- :paramtype timeout: ~datetime.timedelta
- """
- super().__init__(**kwargs)
- self.failure_threshold = failure_threshold
- self.initial_delay = initial_delay
- self.period = period
- self.success_threshold = success_threshold
- self.timeout = timeout
+ self.discovery_url = discovery_url
+ self.intellectual_property_publisher = intellectual_property_publisher
+ self.managed_resource_group = managed_resource_group
+ self.ml_flow_registry_uri = ml_flow_registry_uri
+ self.registry_private_endpoint_connections = registry_private_endpoint_connections
+ self.public_network_access = public_network_access
+ self.region_details = region_details
-class PyTorch(DistributionConfiguration):
- """PyTorch distribution configuration.
+class RegistryListCredentialsResult(_serialization.Model):
+ """RegistryListCredentialsResult.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
- Known values are: "PyTorch", "TensorFlow", and "Mpi".
- :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
- :ivar process_count_per_instance: Number of processes per node.
- :vartype process_count_per_instance: int
+ :ivar location: The location of the workspace ACR.
+ :vartype location: str
+ :ivar passwords:
+ :vartype passwords: list[~azure.mgmt.machinelearningservices.models.Password]
+ :ivar username: The username of the workspace ACR.
+ :vartype username: str
"""
_validation = {
- "distribution_type": {"required": True},
+ "location": {"readonly": True},
+ "username": {"readonly": True},
}
_attribute_map = {
- "distribution_type": {"key": "distributionType", "type": "str"},
- "process_count_per_instance": {"key": "processCountPerInstance", "type": "int"},
+ "location": {"key": "location", "type": "str"},
+ "passwords": {"key": "passwords", "type": "[Password]"},
+ "username": {"key": "username", "type": "str"},
}
- def __init__(self, *, process_count_per_instance: Optional[int] = None, **kwargs: Any) -> None:
+ def __init__(self, *, passwords: Optional[List["_models.Password"]] = None, **kwargs: Any) -> None:
"""
- :keyword process_count_per_instance: Number of processes per node.
- :paramtype process_count_per_instance: int
+ :keyword passwords:
+ :paramtype passwords: list[~azure.mgmt.machinelearningservices.models.Password]
"""
super().__init__(**kwargs)
- self.distribution_type: str = "PyTorch"
- self.process_count_per_instance = process_count_per_instance
+ self.location = None
+ self.passwords = passwords
+ self.username = None
-class QuotaBaseProperties(_serialization.Model):
- """The properties for Quota update or retrieval.
+class RegistryPartialManagedServiceIdentity(ManagedServiceIdentity):
+ """Managed service identity (system assigned and/or user assigned identities).
- :ivar id: Specifies the resource ID.
- :vartype id: str
- :ivar type: Specifies the resource type.
- :vartype type: str
- :ivar limit: The maximum permitted quota of the resource.
- :vartype limit: int
- :ivar unit: An enum describing the unit of quota measurement. "Count"
- :vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar principal_id: The service principal ID of the system assigned identity. This property
+ will only be provided for a system assigned identity.
+ :vartype principal_id: str
+ :ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
+ provided for a system assigned identity.
+ :vartype tenant_id: str
+ :ivar type: Type of managed service identity (where both SystemAssigned and UserAssigned types
+ are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
+ "SystemAssigned,UserAssigned".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :ivar user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
+ The dictionary values can be empty objects ({}) in requests.
+ :vartype user_assigned_identities: dict[str,
+ ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
"""
+ _validation = {
+ "principal_id": {"readonly": True},
+ "tenant_id": {"readonly": True},
+ "type": {"required": True},
+ }
+
_attribute_map = {
- "id": {"key": "id", "type": "str"},
+ "principal_id": {"key": "principalId", "type": "str"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
"type": {"key": "type", "type": "str"},
- "limit": {"key": "limit", "type": "int"},
- "unit": {"key": "unit", "type": "str"},
+ "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentity}"},
}
def __init__(
self,
*,
- id: Optional[str] = None, # pylint: disable=redefined-builtin
- type: Optional[str] = None,
- limit: Optional[int] = None,
- unit: Optional[Union[str, "_models.QuotaUnit"]] = None,
+ type: Union[str, "_models.ManagedServiceIdentityType"],
+ user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword id: Specifies the resource ID.
- :paramtype id: str
- :keyword type: Specifies the resource type.
- :paramtype type: str
- :keyword limit: The maximum permitted quota of the resource.
- :paramtype limit: int
- :keyword unit: An enum describing the unit of quota measurement. "Count"
- :paramtype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
+ :keyword type: Type of managed service identity (where both SystemAssigned and UserAssigned
+ types are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
+ "SystemAssigned,UserAssigned".
+ :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :keyword user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
+ The dictionary values can be empty objects ({}) in requests.
+ :paramtype user_assigned_identities: dict[str,
+ ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
"""
- super().__init__(**kwargs)
- self.id = id
- self.type = type
- self.limit = limit
- self.unit = unit
+ super().__init__(type=type, user_assigned_identities=user_assigned_identities, **kwargs)
-class QuotaUpdateParameters(_serialization.Model):
- """Quota update parameters.
+class RegistryPrivateEndpointConnection(_serialization.Model):
+ """Private endpoint connection definition.
- :ivar value: The list for update quota.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
- :ivar location: Region of workspace quota to be updated.
+ :ivar id: This is the private endpoint connection name created on SRP
+ Full resource id:
+ /subscriptions/{subId}/resourceGroups/{rgName}/providers/Microsoft.MachineLearningServices/{resourceType}/{resourceName}/registryPrivateEndpointConnections/{peConnectionName}.
+ :vartype id: str
+ :ivar location: Same as workspace location.
:vartype location: str
+ :ivar group_ids: The group ids.
+ :vartype group_ids: list[str]
+ :ivar private_endpoint: The PE network resource that is linked to this PE connection.
+ :vartype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpointResource
+ :ivar registry_private_link_service_connection_state: The connection state.
+ :vartype registry_private_link_service_connection_state:
+ ~azure.mgmt.machinelearningservices.models.RegistryPrivateLinkServiceConnectionState
+ :ivar provisioning_state: One of null, "Succeeded", "Provisioning", "Failed". While not
+ approved, it's null.
+ :vartype provisioning_state: str
"""
_attribute_map = {
- "value": {"key": "value", "type": "[QuotaBaseProperties]"},
+ "id": {"key": "id", "type": "str"},
"location": {"key": "location", "type": "str"},
+ "group_ids": {"key": "properties.groupIds", "type": "[str]"},
+ "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpointResource"},
+ "registry_private_link_service_connection_state": {
+ "key": "properties.registryPrivateLinkServiceConnectionState",
+ "type": "RegistryPrivateLinkServiceConnectionState",
+ },
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
- value: Optional[List["_models.QuotaBaseProperties"]] = None,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
location: Optional[str] = None,
+ group_ids: Optional[List[str]] = None,
+ private_endpoint: Optional["_models.PrivateEndpointResource"] = None,
+ registry_private_link_service_connection_state: Optional[
+ "_models.RegistryPrivateLinkServiceConnectionState"
+ ] = None,
+ provisioning_state: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword value: The list for update quota.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
- :keyword location: Region of workspace quota to be updated.
+ :keyword id: This is the private endpoint connection name created on SRP
+ Full resource id:
+ /subscriptions/{subId}/resourceGroups/{rgName}/providers/Microsoft.MachineLearningServices/{resourceType}/{resourceName}/registryPrivateEndpointConnections/{peConnectionName}.
+ :paramtype id: str
+ :keyword location: Same as workspace location.
:paramtype location: str
+ :keyword group_ids: The group ids.
+ :paramtype group_ids: list[str]
+ :keyword private_endpoint: The PE network resource that is linked to this PE connection.
+ :paramtype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpointResource
+ :keyword registry_private_link_service_connection_state: The connection state.
+ :paramtype registry_private_link_service_connection_state:
+ ~azure.mgmt.machinelearningservices.models.RegistryPrivateLinkServiceConnectionState
+ :keyword provisioning_state: One of null, "Succeeded", "Provisioning", "Failed". While not
+ approved, it's null.
+ :paramtype provisioning_state: str
"""
super().__init__(**kwargs)
- self.value = value
+ self.id = id
self.location = location
+ self.group_ids = group_ids
+ self.private_endpoint = private_endpoint
+ self.registry_private_link_service_connection_state = registry_private_link_service_connection_state
+ self.provisioning_state = provisioning_state
-class RandomSamplingAlgorithm(SamplingAlgorithm):
- """Defines a Sampling Algorithm that generates values randomly.
-
- All required parameters must be populated in order to send to Azure.
+class RegistryPrivateLinkServiceConnectionState(_serialization.Model):
+ """The connection state.
- :ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
- values, along with configuration properties. Required. Known values are: "Grid", "Random", and
- "Bayesian".
- :vartype sampling_algorithm_type: str or
- ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
- :ivar rule: The specific type of random algorithm. Known values are: "Random" and "Sobol".
- :vartype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
- :ivar seed: An optional integer to use as the seed for random number generation.
- :vartype seed: int
+ :ivar actions_required: Some RP chose "None". Other RPs use this for region expansion.
+ :vartype actions_required: str
+ :ivar description: User-defined message that, per NRP doc, may be used for approval-related
+ message.
+ :vartype description: str
+ :ivar status: Connection status of the service consumer with the service provider. Known values
+ are: "Approved", "Pending", "Rejected", "Disconnected", and "Timeout".
+ :vartype status: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
"""
- _validation = {
- "sampling_algorithm_type": {"required": True},
- }
-
_attribute_map = {
- "sampling_algorithm_type": {"key": "samplingAlgorithmType", "type": "str"},
- "rule": {"key": "rule", "type": "str"},
- "seed": {"key": "seed", "type": "int"},
+ "actions_required": {"key": "actionsRequired", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
- rule: Optional[Union[str, "_models.RandomSamplingAlgorithmRule"]] = None,
- seed: Optional[int] = None,
+ actions_required: Optional[str] = None,
+ description: Optional[str] = None,
+ status: Optional[Union[str, "_models.EndpointServiceConnectionStatus"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword rule: The specific type of random algorithm. Known values are: "Random" and "Sobol".
- :paramtype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
- :keyword seed: An optional integer to use as the seed for random number generation.
- :paramtype seed: int
+ :keyword actions_required: Some RP chose "None". Other RPs use this for region expansion.
+ :paramtype actions_required: str
+ :keyword description: User-defined message that, per NRP doc, may be used for approval-related
+ message.
+ :paramtype description: str
+ :keyword status: Connection status of the service consumer with the service provider. Known
+ values are: "Approved", "Pending", "Rejected", "Disconnected", and "Timeout".
+ :paramtype status: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
"""
super().__init__(**kwargs)
- self.sampling_algorithm_type: str = "Random"
- self.rule = rule
- self.seed = seed
+ self.actions_required = actions_required
+ self.description = description
+ self.status = status
-class Recurrence(_serialization.Model):
- """The workflow trigger recurrence for ComputeStartStop schedule type.
+class RegistryRegionArmDetails(_serialization.Model):
+ """Details for each region the registry is in.
- :ivar frequency: [Required] The frequency to trigger schedule. Known values are: "Minute",
- "Hour", "Day", "Week", and "Month".
- :vartype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
- :ivar interval: [Required] Specifies schedule interval in conjunction with frequency.
- :vartype interval: int
- :ivar start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
- :vartype start_time: str
- :ivar time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :vartype time_zone: str
- :ivar schedule: [Required] The recurrence schedule.
- :vartype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
+ :ivar acr_details: List of ACR accounts.
+ :vartype acr_details: list[~azure.mgmt.machinelearningservices.models.AcrDetails]
+ :ivar location: The location where the registry exists.
+ :vartype location: str
+ :ivar storage_account_details: List of storage accounts.
+ :vartype storage_account_details:
+ list[~azure.mgmt.machinelearningservices.models.StorageAccountDetails]
"""
_attribute_map = {
- "frequency": {"key": "frequency", "type": "str"},
- "interval": {"key": "interval", "type": "int"},
- "start_time": {"key": "startTime", "type": "str"},
- "time_zone": {"key": "timeZone", "type": "str"},
- "schedule": {"key": "schedule", "type": "RecurrenceSchedule"},
+ "acr_details": {"key": "acrDetails", "type": "[AcrDetails]"},
+ "location": {"key": "location", "type": "str"},
+ "storage_account_details": {"key": "storageAccountDetails", "type": "[StorageAccountDetails]"},
}
-
- def __init__(
- self,
- *,
- frequency: Optional[Union[str, "_models.RecurrenceFrequency"]] = None,
- interval: Optional[int] = None,
- start_time: Optional[str] = None,
- time_zone: str = "UTC",
- schedule: Optional["_models.RecurrenceSchedule"] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword frequency: [Required] The frequency to trigger schedule. Known values are: "Minute",
- "Hour", "Day", "Week", and "Month".
- :paramtype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
- :keyword interval: [Required] Specifies schedule interval in conjunction with frequency.
- :paramtype interval: int
- :keyword start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
- :paramtype start_time: str
- :keyword time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :paramtype time_zone: str
- :keyword schedule: [Required] The recurrence schedule.
- :paramtype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
+
+ def __init__(
+ self,
+ *,
+ acr_details: Optional[List["_models.AcrDetails"]] = None,
+ location: Optional[str] = None,
+ storage_account_details: Optional[List["_models.StorageAccountDetails"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword acr_details: List of ACR accounts.
+ :paramtype acr_details: list[~azure.mgmt.machinelearningservices.models.AcrDetails]
+ :keyword location: The location where the registry exists.
+ :paramtype location: str
+ :keyword storage_account_details: List of storage accounts.
+ :paramtype storage_account_details:
+ list[~azure.mgmt.machinelearningservices.models.StorageAccountDetails]
"""
super().__init__(**kwargs)
- self.frequency = frequency
- self.interval = interval
- self.start_time = start_time
- self.time_zone = time_zone
- self.schedule = schedule
-
+ self.acr_details = acr_details
+ self.location = location
+ self.storage_account_details = storage_account_details
-class RecurrenceSchedule(_serialization.Model):
- """RecurrenceSchedule.
- All required parameters must be populated in order to send to Azure.
+class RegistryTrackedResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of Registry entities.
- :ivar hours: [Required] List of hours for the schedule. Required.
- :vartype hours: list[int]
- :ivar minutes: [Required] List of minutes for the schedule. Required.
- :vartype minutes: list[int]
- :ivar month_days: List of month days for the schedule.
- :vartype month_days: list[int]
- :ivar week_days: List of days for the schedule.
- :vartype week_days: list[str or ~azure.mgmt.machinelearningservices.models.WeekDay]
+ :ivar next_link: The link to the next page of Registry objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type Registry.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Registry]
"""
- _validation = {
- "hours": {"required": True},
- "minutes": {"required": True},
- }
-
_attribute_map = {
- "hours": {"key": "hours", "type": "[int]"},
- "minutes": {"key": "minutes", "type": "[int]"},
- "month_days": {"key": "monthDays", "type": "[int]"},
- "week_days": {"key": "weekDays", "type": "[str]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[Registry]"},
}
def __init__(
- self,
- *,
- hours: List[int],
- minutes: List[int],
- month_days: Optional[List[int]] = None,
- week_days: Optional[List[Union[str, "_models.WeekDay"]]] = None,
- **kwargs: Any
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.Registry"]] = None, **kwargs: Any
) -> None:
"""
- :keyword hours: [Required] List of hours for the schedule. Required.
- :paramtype hours: list[int]
- :keyword minutes: [Required] List of minutes for the schedule. Required.
- :paramtype minutes: list[int]
- :keyword month_days: List of month days for the schedule.
- :paramtype month_days: list[int]
- :keyword week_days: List of days for the schedule.
- :paramtype week_days: list[str or ~azure.mgmt.machinelearningservices.models.WeekDay]
+ :keyword next_link: The link to the next page of Registry objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type Registry.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.Registry]
"""
super().__init__(**kwargs)
- self.hours = hours
- self.minutes = minutes
- self.month_days = month_days
- self.week_days = week_days
+ self.next_link = next_link
+ self.value = value
-class RecurrenceTrigger(TriggerBase):
- """RecurrenceTrigger.
+class Regression(TableVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
+ """Regression task in AutoML Table vertical.
All required parameters must be populated in order to send to Azure.
- :ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
- https://en.wikipedia.org/wiki/ISO_8601.
- Recommented format would be "2022-06-01T00:00:01"
- If not present, the schedule will run indefinitely.
- :vartype end_time: str
- :ivar start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
- offset.
- :vartype start_time: str
- :ivar time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :vartype time_zone: str
- :ivar trigger_type: [Required]. Required. Known values are: "Recurrence" and "Cron".
- :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
- :ivar frequency: [Required] The frequency to trigger schedule. Required. Known values are:
- "Minute", "Hour", "Day", "Week", and "Month".
- :vartype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
- :ivar interval: [Required] Specifies schedule interval in conjunction with frequency. Required.
- :vartype interval: int
- :ivar schedule: The recurrence schedule.
- :vartype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar cv_split_column_names: Columns to use for CVSplit data.
+ :vartype cv_split_column_names: list[str]
+ :ivar featurization_settings: Featurization inputs needed for AutoML job.
+ :vartype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :ivar fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :vartype fixed_parameters: ~azure.mgmt.machinelearningservices.models.TableFixedParameters
+ :ivar limit_settings: Execution constraints for AutoMLJob.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
+ when validation dataset is not provided.
+ :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space: list[~azure.mgmt.machinelearningservices.models.TableParameterSubspace]
+ :ivar sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.TableSweepSettings
+ :ivar test_data: Test data input.
+ :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype test_data_size: float
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :vartype weight_column_name: str
+ :ivar primary_metric: Primary metric for regression task. Known values are:
+ "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
+ "NormalizedMeanAbsoluteError".
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
+ :ivar training_settings: Inputs for training phase for an AutoML Job.
+ :vartype training_settings:
+ ~azure.mgmt.machinelearningservices.models.RegressionTrainingSettings
"""
_validation = {
- "trigger_type": {"required": True},
- "frequency": {"required": True},
- "interval": {"required": True},
+ "task_type": {"required": True},
+ "training_data": {"required": True},
}
_attribute_map = {
- "end_time": {"key": "endTime", "type": "str"},
- "start_time": {"key": "startTime", "type": "str"},
- "time_zone": {"key": "timeZone", "type": "str"},
- "trigger_type": {"key": "triggerType", "type": "str"},
- "frequency": {"key": "frequency", "type": "str"},
- "interval": {"key": "interval", "type": "int"},
- "schedule": {"key": "schedule", "type": "RecurrenceSchedule"},
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
+ "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
+ "fixed_parameters": {"key": "fixedParameters", "type": "TableFixedParameters"},
+ "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
+ "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
+ "search_space": {"key": "searchSpace", "type": "[TableParameterSubspace]"},
+ "sweep_settings": {"key": "sweepSettings", "type": "TableSweepSettings"},
+ "test_data": {"key": "testData", "type": "MLTableJobInput"},
+ "test_data_size": {"key": "testDataSize", "type": "float"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "weight_column_name": {"key": "weightColumnName", "type": "str"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "training_settings": {"key": "trainingSettings", "type": "RegressionTrainingSettings"},
}
def __init__(
self,
*,
- frequency: Union[str, "_models.RecurrenceFrequency"],
- interval: int,
- end_time: Optional[str] = None,
- start_time: Optional[str] = None,
- time_zone: str = "UTC",
- schedule: Optional["_models.RecurrenceSchedule"] = None,
+ training_data: "_models.MLTableJobInput",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ cv_split_column_names: Optional[List[str]] = None,
+ featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
+ fixed_parameters: Optional["_models.TableFixedParameters"] = None,
+ limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
+ n_cross_validations: Optional["_models.NCrossValidations"] = None,
+ search_space: Optional[List["_models.TableParameterSubspace"]] = None,
+ sweep_settings: Optional["_models.TableSweepSettings"] = None,
+ test_data: Optional["_models.MLTableJobInput"] = None,
+ test_data_size: Optional[float] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ weight_column_name: Optional[str] = None,
+ primary_metric: Optional[Union[str, "_models.RegressionPrimaryMetrics"]] = None,
+ training_settings: Optional["_models.RegressionTrainingSettings"] = None,
**kwargs: Any
) -> None:
"""
- :keyword end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
- https://en.wikipedia.org/wiki/ISO_8601.
- Recommented format would be "2022-06-01T00:00:01"
- If not present, the schedule will run indefinitely.
- :paramtype end_time: str
- :keyword start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
- offset.
- :paramtype start_time: str
- :keyword time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :paramtype time_zone: str
- :keyword frequency: [Required] The frequency to trigger schedule. Required. Known values are:
- "Minute", "Hour", "Day", "Week", and "Month".
- :paramtype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
- :keyword interval: [Required] Specifies schedule interval in conjunction with frequency.
- Required.
- :paramtype interval: int
- :keyword schedule: The recurrence schedule.
- :paramtype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword cv_split_column_names: Columns to use for CVSplit data.
+ :paramtype cv_split_column_names: list[str]
+ :keyword featurization_settings: Featurization inputs needed for AutoML job.
+ :paramtype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :keyword fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :paramtype fixed_parameters: ~azure.mgmt.machinelearningservices.models.TableFixedParameters
+ :keyword limit_settings: Execution constraints for AutoMLJob.
+ :paramtype limit_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :keyword n_cross_validations: Number of cross validation folds to be applied on training
+ dataset
+ when validation dataset is not provided.
+ :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.TableParameterSubspace]
+ :keyword sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.TableSweepSettings
+ :keyword test_data: Test data input.
+ :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype test_data_size: float
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :paramtype weight_column_name: str
+ :keyword primary_metric: Primary metric for regression task. Known values are:
+ "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
+ "NormalizedMeanAbsoluteError".
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
+ :keyword training_settings: Inputs for training phase for an AutoML Job.
+ :paramtype training_settings:
+ ~azure.mgmt.machinelearningservices.models.RegressionTrainingSettings
"""
- super().__init__(end_time=end_time, start_time=start_time, time_zone=time_zone, **kwargs)
- self.trigger_type: str = "Recurrence"
- self.frequency = frequency
- self.interval = interval
- self.schedule = schedule
+ super().__init__(
+ cv_split_column_names=cv_split_column_names,
+ featurization_settings=featurization_settings,
+ fixed_parameters=fixed_parameters,
+ limit_settings=limit_settings,
+ n_cross_validations=n_cross_validations,
+ search_space=search_space,
+ sweep_settings=sweep_settings,
+ test_data=test_data,
+ test_data_size=test_data_size,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ weight_column_name=weight_column_name,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "Regression"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.training_settings = training_settings
+ self.cv_split_column_names = cv_split_column_names
+ self.featurization_settings = featurization_settings
+ self.fixed_parameters = fixed_parameters
+ self.limit_settings = limit_settings
+ self.n_cross_validations = n_cross_validations
+ self.search_space = search_space
+ self.sweep_settings = sweep_settings
+ self.test_data = test_data
+ self.test_data_size = test_data_size
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.weight_column_name = weight_column_name
-class RegenerateEndpointKeysRequest(_serialization.Model):
- """RegenerateEndpointKeysRequest.
+class RegressionModelPerformanceMetricThreshold(ModelPerformanceMetricThresholdBase):
+ """RegressionModelPerformanceMetricThreshold.
All required parameters must be populated in order to send to Azure.
- :ivar key_type: [Required] Specification for which type of key to generate. Primary or
- Secondary. Required. Known values are: "Primary" and "Secondary".
- :vartype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
- :ivar key_value: The value the key is set to.
- :vartype key_value: str
+ :ivar model_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Classification" and "Regression".
+ :vartype model_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringModelType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The regression model performance metric to calculate. Required. Known
+ values are: "MeanAbsoluteError", "RootMeanSquaredError", and "MeanSquaredError".
+ :vartype metric: str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModelPerformanceMetric
"""
_validation = {
- "key_type": {"required": True},
+ "model_type": {"required": True},
+ "metric": {"required": True},
}
_attribute_map = {
- "key_type": {"key": "keyType", "type": "str"},
- "key_value": {"key": "keyValue", "type": "str"},
+ "model_type": {"key": "modelType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
def __init__(
- self, *, key_type: Union[str, "_models.KeyType"], key_value: Optional[str] = None, **kwargs: Any
+ self,
+ *,
+ metric: Union[str, "_models.RegressionModelPerformanceMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword key_type: [Required] Specification for which type of key to generate. Primary or
- Secondary. Required. Known values are: "Primary" and "Secondary".
- :paramtype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
- :keyword key_value: The value the key is set to.
- :paramtype key_value: str
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The regression model performance metric to calculate. Required.
+ Known values are: "MeanAbsoluteError", "RootMeanSquaredError", and "MeanSquaredError".
+ :paramtype metric: str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModelPerformanceMetric
"""
- super().__init__(**kwargs)
- self.key_type = key_type
- self.key_value = key_value
-
-
-class Registry(TrackedResource): # pylint: disable=too-many-instance-attributes
- """Registry.
+ super().__init__(threshold=threshold, **kwargs)
+ self.model_type: str = "Regression"
+ self.metric = metric
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class RegressionTrainingSettings(TrainingSettings):
+ """Regression Training related configuration.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- :ivar location: The geo-location where the resource lives. Required.
- :vartype location: str
- :ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :vartype kind: str
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :ivar discovery_url: Discovery URL for the Registry.
- :vartype discovery_url: str
- :ivar intellectual_property_publisher: IntellectualPropertyPublisher for the registry.
- :vartype intellectual_property_publisher: str
- :ivar managed_resource_group: ResourceId of the managed RG if the registry has system created
- resources.
- :vartype managed_resource_group: ~azure.mgmt.machinelearningservices.models.ArmResourceId
- :ivar ml_flow_registry_uri: MLFlow Registry URI for the Registry.
- :vartype ml_flow_registry_uri: str
- :ivar registry_private_endpoint_connections: Private endpoint connections info used for pending
- connections in private link portal.
- :vartype registry_private_endpoint_connections:
- list[~azure.mgmt.machinelearningservices.models.RegistryPrivateEndpointConnection]
- :ivar public_network_access: Is the Registry accessible from the internet?
- Possible values: "Enabled" or "Disabled".
- :vartype public_network_access: str
- :ivar region_details: Details of each region the registry is in.
- :vartype region_details:
- list[~azure.mgmt.machinelearningservices.models.RegistryRegionArmDetails]
+ :ivar enable_dnn_training: Enable recommendation of DNN models.
+ :vartype enable_dnn_training: bool
+ :ivar enable_model_explainability: Flag to turn on explainability on best model.
+ :vartype enable_model_explainability: bool
+ :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :vartype enable_onnx_compatible_models: bool
+ :ivar enable_stack_ensemble: Enable stack ensemble run.
+ :vartype enable_stack_ensemble: bool
+ :ivar enable_vote_ensemble: Enable voting ensemble run.
+ :vartype enable_vote_ensemble: bool
+ :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :vartype ensemble_model_download_timeout: ~datetime.timedelta
+ :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :vartype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :ivar training_mode: TrainingMode mode - Setting to 'auto' is same as setting it to
+ 'non-distributed' for now, however in the future may result in mixed mode or heuristics based
+ mode selection. Default is 'auto'.
+ If 'Distributed' then only distributed featurization is used and distributed algorithms are
+ chosen.
+ If 'NonDistributed' then only non distributed algorithms are chosen. Known values are: "Auto",
+ "Distributed", and "NonDistributed".
+ :vartype training_mode: str or ~azure.mgmt.machinelearningservices.models.TrainingMode
+ :ivar allowed_training_algorithms: Allowed models for regression task.
+ :vartype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModels]
+ :ivar blocked_training_algorithms: Blocked models for regression task.
+ :vartype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModels]
"""
- _validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "location": {"required": True},
- }
-
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "tags": {"key": "tags", "type": "{str}"},
- "location": {"key": "location", "type": "str"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "kind": {"key": "kind", "type": "str"},
- "sku": {"key": "sku", "type": "Sku"},
- "discovery_url": {"key": "properties.discoveryUrl", "type": "str"},
- "intellectual_property_publisher": {"key": "properties.intellectualPropertyPublisher", "type": "str"},
- "managed_resource_group": {"key": "properties.managedResourceGroup", "type": "ArmResourceId"},
- "ml_flow_registry_uri": {"key": "properties.mlFlowRegistryUri", "type": "str"},
- "registry_private_endpoint_connections": {
- "key": "properties.registryPrivateEndpointConnections",
- "type": "[RegistryPrivateEndpointConnection]",
- },
- "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
- "region_details": {"key": "properties.regionDetails", "type": "[RegistryRegionArmDetails]"},
+ "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
+ "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
+ "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
+ "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
+ "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
+ "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
+ "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
+ "training_mode": {"key": "trainingMode", "type": "str"},
+ "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
+ "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
}
def __init__(
self,
*,
- location: str,
- tags: Optional[Dict[str, str]] = None,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- kind: Optional[str] = None,
- sku: Optional["_models.Sku"] = None,
- discovery_url: Optional[str] = None,
- intellectual_property_publisher: Optional[str] = None,
- managed_resource_group: Optional["_models.ArmResourceId"] = None,
- ml_flow_registry_uri: Optional[str] = None,
- registry_private_endpoint_connections: Optional[List["_models.RegistryPrivateEndpointConnection"]] = None,
- public_network_access: Optional[str] = None,
- region_details: Optional[List["_models.RegistryRegionArmDetails"]] = None,
+ enable_dnn_training: bool = False,
+ enable_model_explainability: bool = True,
+ enable_onnx_compatible_models: bool = False,
+ enable_stack_ensemble: bool = True,
+ enable_vote_ensemble: bool = True,
+ ensemble_model_download_timeout: datetime.timedelta = "PT5M",
+ stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
+ training_mode: Optional[Union[str, "_models.TrainingMode"]] = None,
+ allowed_training_algorithms: Optional[List[Union[str, "_models.RegressionModels"]]] = None,
+ blocked_training_algorithms: Optional[List[Union[str, "_models.RegressionModels"]]] = None,
**kwargs: Any
) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
- :keyword location: The geo-location where the resource lives. Required.
- :paramtype location: str
- :keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :paramtype kind: str
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :keyword discovery_url: Discovery URL for the Registry.
- :paramtype discovery_url: str
- :keyword intellectual_property_publisher: IntellectualPropertyPublisher for the registry.
- :paramtype intellectual_property_publisher: str
- :keyword managed_resource_group: ResourceId of the managed RG if the registry has system
- created resources.
- :paramtype managed_resource_group: ~azure.mgmt.machinelearningservices.models.ArmResourceId
- :keyword ml_flow_registry_uri: MLFlow Registry URI for the Registry.
- :paramtype ml_flow_registry_uri: str
- :keyword registry_private_endpoint_connections: Private endpoint connections info used for
- pending connections in private link portal.
- :paramtype registry_private_endpoint_connections:
- list[~azure.mgmt.machinelearningservices.models.RegistryPrivateEndpointConnection]
- :keyword public_network_access: Is the Registry accessible from the internet?
- Possible values: "Enabled" or "Disabled".
- :paramtype public_network_access: str
- :keyword region_details: Details of each region the registry is in.
- :paramtype region_details:
- list[~azure.mgmt.machinelearningservices.models.RegistryRegionArmDetails]
+ :keyword enable_dnn_training: Enable recommendation of DNN models.
+ :paramtype enable_dnn_training: bool
+ :keyword enable_model_explainability: Flag to turn on explainability on best model.
+ :paramtype enable_model_explainability: bool
+ :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :paramtype enable_onnx_compatible_models: bool
+ :keyword enable_stack_ensemble: Enable stack ensemble run.
+ :paramtype enable_stack_ensemble: bool
+ :keyword enable_vote_ensemble: Enable voting ensemble run.
+ :paramtype enable_vote_ensemble: bool
+ :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :paramtype ensemble_model_download_timeout: ~datetime.timedelta
+ :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :paramtype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :keyword training_mode: TrainingMode mode - Setting to 'auto' is same as setting it to
+ 'non-distributed' for now, however in the future may result in mixed mode or heuristics based
+ mode selection. Default is 'auto'.
+ If 'Distributed' then only distributed featurization is used and distributed algorithms are
+ chosen.
+ If 'NonDistributed' then only non distributed algorithms are chosen. Known values are: "Auto",
+ "Distributed", and "NonDistributed".
+ :paramtype training_mode: str or ~azure.mgmt.machinelearningservices.models.TrainingMode
+ :keyword allowed_training_algorithms: Allowed models for regression task.
+ :paramtype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModels]
+ :keyword blocked_training_algorithms: Blocked models for regression task.
+ :paramtype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModels]
"""
- super().__init__(tags=tags, location=location, **kwargs)
- self.identity = identity
- self.kind = kind
- self.sku = sku
- self.discovery_url = discovery_url
- self.intellectual_property_publisher = intellectual_property_publisher
- self.managed_resource_group = managed_resource_group
- self.ml_flow_registry_uri = ml_flow_registry_uri
- self.registry_private_endpoint_connections = registry_private_endpoint_connections
- self.public_network_access = public_network_access
- self.region_details = region_details
-
+ super().__init__(
+ enable_dnn_training=enable_dnn_training,
+ enable_model_explainability=enable_model_explainability,
+ enable_onnx_compatible_models=enable_onnx_compatible_models,
+ enable_stack_ensemble=enable_stack_ensemble,
+ enable_vote_ensemble=enable_vote_ensemble,
+ ensemble_model_download_timeout=ensemble_model_download_timeout,
+ stack_ensemble_settings=stack_ensemble_settings,
+ training_mode=training_mode,
+ **kwargs
+ )
+ self.allowed_training_algorithms = allowed_training_algorithms
+ self.blocked_training_algorithms = blocked_training_algorithms
-class RegistryListCredentialsResult(_serialization.Model):
- """RegistryListCredentialsResult.
- Variables are only populated by the server, and will be ignored when sending a request.
+class RequestLogging(_serialization.Model):
+ """RequestLogging.
- :ivar location:
- :vartype location: str
- :ivar username:
- :vartype username: str
- :ivar passwords:
- :vartype passwords: list[~azure.mgmt.machinelearningservices.models.Password]
+ :ivar capture_headers: For payload logging, we only collect payload by default. If customers
+ also want to collect the specified headers, they can set them in captureHeaders so that backend
+ will collect those headers along with payload.
+ :vartype capture_headers: list[str]
"""
- _validation = {
- "location": {"readonly": True},
- "username": {"readonly": True},
- }
-
_attribute_map = {
- "location": {"key": "location", "type": "str"},
- "username": {"key": "username", "type": "str"},
- "passwords": {"key": "passwords", "type": "[Password]"},
+ "capture_headers": {"key": "captureHeaders", "type": "[str]"},
}
- def __init__(self, *, passwords: Optional[List["_models.Password"]] = None, **kwargs: Any) -> None:
+ def __init__(self, *, capture_headers: Optional[List[str]] = None, **kwargs: Any) -> None:
"""
- :keyword passwords:
- :paramtype passwords: list[~azure.mgmt.machinelearningservices.models.Password]
+ :keyword capture_headers: For payload logging, we only collect payload by default. If customers
+ also want to collect the specified headers, they can set them in captureHeaders so that backend
+ will collect those headers along with payload.
+ :paramtype capture_headers: list[str]
"""
super().__init__(**kwargs)
- self.location = None
- self.username = None
- self.passwords = passwords
-
+ self.capture_headers = capture_headers
-class RegistryPartialManagedServiceIdentity(ManagedServiceIdentity):
- """Managed service identity (system assigned and/or user assigned identities).
-
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class ResizeSchema(_serialization.Model):
+ """Schema for Compute Instance resize.
- :ivar principal_id: The service principal ID of the system assigned identity. This property
- will only be provided for a system assigned identity.
- :vartype principal_id: str
- :ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
- provided for a system assigned identity.
- :vartype tenant_id: str
- :ivar type: Type of managed service identity (where both SystemAssigned and UserAssigned types
- are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
- "SystemAssigned,UserAssigned".
- :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :ivar user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :vartype user_assigned_identities: dict[str,
- ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ :ivar target_vm_size: The name of the virtual machine size.
+ :vartype target_vm_size: str
"""
- _validation = {
- "principal_id": {"readonly": True},
- "tenant_id": {"readonly": True},
- "type": {"required": True},
- }
-
_attribute_map = {
- "principal_id": {"key": "principalId", "type": "str"},
- "tenant_id": {"key": "tenantId", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentity}"},
+ "target_vm_size": {"key": "targetVMSize", "type": "str"},
}
- def __init__(
- self,
- *,
- type: Union[str, "_models.ManagedServiceIdentityType"],
- user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, target_vm_size: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword type: Type of managed service identity (where both SystemAssigned and UserAssigned
- types are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
- "SystemAssigned,UserAssigned".
- :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :keyword user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :paramtype user_assigned_identities: dict[str,
- ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ :keyword target_vm_size: The name of the virtual machine size.
+ :paramtype target_vm_size: str
"""
- super().__init__(type=type, user_assigned_identities=user_assigned_identities, **kwargs)
+ super().__init__(**kwargs)
+ self.target_vm_size = target_vm_size
-class RegistryPrivateEndpointConnection(_serialization.Model):
- """Private endpoint connection definition.
+class ResourceId(_serialization.Model):
+ """Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet.
- :ivar id: This is the private endpoint connection name created on SRP
- Full resource id:
- /subscriptions/{subId}/resourceGroups/{rgName}/providers/Microsoft.MachineLearningServices/{resourceType}/{resourceName}/registryPrivateEndpointConnections/{peConnectionName}.
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: The ID of the resource. Required.
:vartype id: str
- :ivar location: Same as workspace location.
- :vartype location: str
- :ivar group_ids: The group ids.
- :vartype group_ids: list[str]
- :ivar private_endpoint: The PE network resource that is linked to this PE connection.
- :vartype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpointResource
- :ivar registry_private_link_service_connection_state: The connection state.
- :vartype registry_private_link_service_connection_state:
- ~azure.mgmt.machinelearningservices.models.RegistryPrivateLinkServiceConnectionState
- :ivar provisioning_state: One of null, "Succeeded", "Provisioning", "Failed". While not
- approved, it's null.
- :vartype provisioning_state: str
"""
+ _validation = {
+ "id": {"required": True},
+ }
+
_attribute_map = {
"id": {"key": "id", "type": "str"},
- "location": {"key": "location", "type": "str"},
- "group_ids": {"key": "properties.groupIds", "type": "[str]"},
- "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpointResource"},
- "registry_private_link_service_connection_state": {
- "key": "properties.registryPrivateLinkServiceConnectionState",
- "type": "RegistryPrivateLinkServiceConnectionState",
- },
- "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
- def __init__(
- self,
- *,
- id: Optional[str] = None, # pylint: disable=redefined-builtin
- location: Optional[str] = None,
- group_ids: Optional[List[str]] = None,
- private_endpoint: Optional["_models.PrivateEndpointResource"] = None,
- registry_private_link_service_connection_state: Optional[
- "_models.RegistryPrivateLinkServiceConnectionState"
- ] = None,
- provisioning_state: Optional[str] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, id: str, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
"""
- :keyword id: This is the private endpoint connection name created on SRP
- Full resource id:
- /subscriptions/{subId}/resourceGroups/{rgName}/providers/Microsoft.MachineLearningServices/{resourceType}/{resourceName}/registryPrivateEndpointConnections/{peConnectionName}.
+ :keyword id: The ID of the resource. Required.
:paramtype id: str
- :keyword location: Same as workspace location.
- :paramtype location: str
- :keyword group_ids: The group ids.
- :paramtype group_ids: list[str]
- :keyword private_endpoint: The PE network resource that is linked to this PE connection.
- :paramtype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpointResource
- :keyword registry_private_link_service_connection_state: The connection state.
- :paramtype registry_private_link_service_connection_state:
- ~azure.mgmt.machinelearningservices.models.RegistryPrivateLinkServiceConnectionState
- :keyword provisioning_state: One of null, "Succeeded", "Provisioning", "Failed". While not
- approved, it's null.
- :paramtype provisioning_state: str
"""
super().__init__(**kwargs)
self.id = id
- self.location = location
- self.group_ids = group_ids
- self.private_endpoint = private_endpoint
- self.registry_private_link_service_connection_state = registry_private_link_service_connection_state
- self.provisioning_state = provisioning_state
-class RegistryPrivateLinkServiceConnectionState(_serialization.Model):
- """The connection state.
+class ResourceName(_serialization.Model):
+ """The Resource Name.
- :ivar actions_required: Some RP chose "None". Other RPs use this for region expansion.
- :vartype actions_required: str
- :ivar description: User-defined message that, per NRP doc, may be used for approval-related
- message.
- :vartype description: str
- :ivar status: Connection status of the service consumer with the service provider. Known values
- are: "Approved", "Pending", "Rejected", and "Disconnected".
- :vartype status: str or
- ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The name of the resource.
+ :vartype value: str
+ :ivar localized_value: The localized name of the resource.
+ :vartype localized_value: str
"""
+ _validation = {
+ "value": {"readonly": True},
+ "localized_value": {"readonly": True},
+ }
+
_attribute_map = {
- "actions_required": {"key": "actionsRequired", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "status": {"key": "status", "type": "str"},
+ "value": {"key": "value", "type": "str"},
+ "localized_value": {"key": "localizedValue", "type": "str"},
}
- def __init__(
- self,
- *,
- actions_required: Optional[str] = None,
- description: Optional[str] = None,
- status: Optional[Union[str, "_models.EndpointServiceConnectionStatus"]] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword actions_required: Some RP chose "None". Other RPs use this for region expansion.
- :paramtype actions_required: str
- :keyword description: User-defined message that, per NRP doc, may be used for approval-related
- message.
- :paramtype description: str
- :keyword status: Connection status of the service consumer with the service provider. Known
- values are: "Approved", "Pending", "Rejected", and "Disconnected".
- :paramtype status: str or
- ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.actions_required = actions_required
- self.description = description
- self.status = status
+ self.value = None
+ self.localized_value = None
-class RegistryRegionArmDetails(_serialization.Model):
- """Details for each region the registry is in.
+class ResourceQuota(_serialization.Model):
+ """The quota assigned to a resource.
- :ivar acr_details: List of ACR accounts.
- :vartype acr_details: list[~azure.mgmt.machinelearningservices.models.AcrDetails]
- :ivar location: The location where the registry exists.
- :vartype location: str
- :ivar storage_account_details: List of storage accounts.
- :vartype storage_account_details:
- list[~azure.mgmt.machinelearningservices.models.StorageAccountDetails]
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Specifies the resource ID.
+ :vartype id: str
+ :ivar aml_workspace_location: Region of the AML workspace in the id.
+ :vartype aml_workspace_location: str
+ :ivar type: Specifies the resource type.
+ :vartype type: str
+ :ivar name: Name of the resource.
+ :vartype name: ~azure.mgmt.machinelearningservices.models.ResourceName
+ :ivar limit: The maximum permitted quota of the resource.
+ :vartype limit: int
+ :ivar unit: An enum describing the unit of quota measurement. "Count"
+ :vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
+ _validation = {
+ "id": {"readonly": True},
+ "aml_workspace_location": {"readonly": True},
+ "type": {"readonly": True},
+ "name": {"readonly": True},
+ "limit": {"readonly": True},
+ "unit": {"readonly": True},
+ }
+
_attribute_map = {
- "acr_details": {"key": "acrDetails", "type": "[AcrDetails]"},
- "location": {"key": "location", "type": "str"},
- "storage_account_details": {"key": "storageAccountDetails", "type": "[StorageAccountDetails]"},
+ "id": {"key": "id", "type": "str"},
+ "aml_workspace_location": {"key": "amlWorkspaceLocation", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "name": {"key": "name", "type": "ResourceName"},
+ "limit": {"key": "limit", "type": "int"},
+ "unit": {"key": "unit", "type": "str"},
}
- def __init__(
- self,
- *,
- acr_details: Optional[List["_models.AcrDetails"]] = None,
- location: Optional[str] = None,
- storage_account_details: Optional[List["_models.StorageAccountDetails"]] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword acr_details: List of ACR accounts.
- :paramtype acr_details: list[~azure.mgmt.machinelearningservices.models.AcrDetails]
- :keyword location: The location where the registry exists.
- :paramtype location: str
- :keyword storage_account_details: List of storage accounts.
- :paramtype storage_account_details:
- list[~azure.mgmt.machinelearningservices.models.StorageAccountDetails]
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.acr_details = acr_details
- self.location = location
- self.storage_account_details = storage_account_details
+ self.id = None
+ self.aml_workspace_location = None
+ self.type = None
+ self.name = None
+ self.limit = None
+ self.unit = None
-class RegistryTrackedResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of Registry entities.
+class RollingInputData(MonitoringInputDataBase):
+ """Rolling input data definition.
- :ivar next_link: The link to the next page of Registry objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type Registry.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.Registry]
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar columns: Mapping of column names to special uses.
+ :vartype columns: dict[str, str]
+ :ivar data_context: The context metadata of the data source.
+ :vartype data_context: str
+ :ivar input_data_type: [Required] Specifies the type of signal to monitor. Required. Known
+ values are: "Static", "Rolling", and "Fixed".
+ :vartype input_data_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataType
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ :ivar preprocessing_component_id: The ARM resource ID of the component resource used to
+ preprocess the data.
+ :vartype preprocessing_component_id: str
+ :ivar window_offset: [Required] The time offset between the end of the data window and the
+ monitor's current run time. Required.
+ :vartype window_offset: ~datetime.timedelta
+ :ivar window_size: [Required] The size of the trailing data window. Required.
+ :vartype window_size: ~datetime.timedelta
"""
+ _validation = {
+ "input_data_type": {"required": True},
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "window_offset": {"required": True},
+ "window_size": {"required": True},
+ }
+
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[Registry]"},
+ "columns": {"key": "columns", "type": "{str}"},
+ "data_context": {"key": "dataContext", "type": "str"},
+ "input_data_type": {"key": "inputDataType", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ "preprocessing_component_id": {"key": "preprocessingComponentId", "type": "str"},
+ "window_offset": {"key": "windowOffset", "type": "duration"},
+ "window_size": {"key": "windowSize", "type": "duration"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.Registry"]] = None, **kwargs: Any
+ self,
+ *,
+ job_input_type: Union[str, "_models.JobInputType"],
+ uri: str,
+ window_offset: datetime.timedelta,
+ window_size: datetime.timedelta,
+ columns: Optional[Dict[str, str]] = None,
+ data_context: Optional[str] = None,
+ preprocessing_component_id: Optional[str] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of Registry objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type Registry.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.Registry]
- """
- super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
+ :keyword columns: Mapping of column names to special uses.
+ :paramtype columns: dict[str, str]
+ :keyword data_context: The context metadata of the data source.
+ :paramtype data_context: str
+ :keyword job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :paramtype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ :keyword preprocessing_component_id: The ARM resource ID of the component resource used to
+ preprocess the data.
+ :paramtype preprocessing_component_id: str
+ :keyword window_offset: [Required] The time offset between the end of the data window and the
+ monitor's current run time. Required.
+ :paramtype window_offset: ~datetime.timedelta
+ :keyword window_size: [Required] The size of the trailing data window. Required.
+ :paramtype window_size: ~datetime.timedelta
+ """
+ super().__init__(columns=columns, data_context=data_context, job_input_type=job_input_type, uri=uri, **kwargs)
+ self.input_data_type: str = "Rolling"
+ self.preprocessing_component_id = preprocessing_component_id
+ self.window_offset = window_offset
+ self.window_size = window_size
-class Regression(TableVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
- """Regression task in AutoML Table vertical.
+class Route(_serialization.Model):
+ """Route.
All required parameters must be populated in order to send to Azure.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar cv_split_column_names: Columns to use for CVSplit data.
- :vartype cv_split_column_names: list[str]
- :ivar featurization_settings: Featurization inputs needed for AutoML job.
- :vartype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :ivar limit_settings: Execution constraints for AutoMLJob.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
- when validation dataset is not provided.
- :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :ivar test_data: Test data input.
- :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype test_data_size: float
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :vartype weight_column_name: str
- :ivar primary_metric: Primary metric for regression task. Known values are:
- "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
- "NormalizedMeanAbsoluteError".
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
- :ivar training_settings: Inputs for training phase for an AutoML Job.
- :vartype training_settings:
- ~azure.mgmt.machinelearningservices.models.RegressionTrainingSettings
+ :ivar path: [Required] The path for the route. Required.
+ :vartype path: str
+ :ivar port: [Required] The port for the route. Required.
+ :vartype port: int
"""
_validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
+ "path": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "port": {"required": True},
}
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
- "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
- "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
- "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
- "test_data": {"key": "testData", "type": "MLTableJobInput"},
- "test_data_size": {"key": "testDataSize", "type": "float"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "weight_column_name": {"key": "weightColumnName", "type": "str"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
- "training_settings": {"key": "trainingSettings", "type": "RegressionTrainingSettings"},
+ "path": {"key": "path", "type": "str"},
+ "port": {"key": "port", "type": "int"},
}
- def __init__(
- self,
- *,
- training_data: "_models.MLTableJobInput",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- cv_split_column_names: Optional[List[str]] = None,
- featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
- limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
- n_cross_validations: Optional["_models.NCrossValidations"] = None,
- test_data: Optional["_models.MLTableJobInput"] = None,
- test_data_size: Optional[float] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- weight_column_name: Optional[str] = None,
- primary_metric: Optional[Union[str, "_models.RegressionPrimaryMetrics"]] = None,
- training_settings: Optional["_models.RegressionTrainingSettings"] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword cv_split_column_names: Columns to use for CVSplit data.
- :paramtype cv_split_column_names: list[str]
- :keyword featurization_settings: Featurization inputs needed for AutoML job.
- :paramtype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :keyword limit_settings: Execution constraints for AutoMLJob.
- :paramtype limit_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :keyword n_cross_validations: Number of cross validation folds to be applied on training
- dataset
- when validation dataset is not provided.
- :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :keyword test_data: Test data input.
- :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype test_data_size: float
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :paramtype weight_column_name: str
- :keyword primary_metric: Primary metric for regression task. Known values are:
- "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
- "NormalizedMeanAbsoluteError".
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
- :keyword training_settings: Inputs for training phase for an AutoML Job.
- :paramtype training_settings:
- ~azure.mgmt.machinelearningservices.models.RegressionTrainingSettings
+ def __init__(self, *, path: str, port: int, **kwargs: Any) -> None:
"""
- super().__init__(
- cv_split_column_names=cv_split_column_names,
- featurization_settings=featurization_settings,
- limit_settings=limit_settings,
- n_cross_validations=n_cross_validations,
- test_data=test_data,
- test_data_size=test_data_size,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- weight_column_name=weight_column_name,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "Regression"
- self.training_data = training_data
- self.primary_metric = primary_metric
- self.training_settings = training_settings
- self.cv_split_column_names = cv_split_column_names
- self.featurization_settings = featurization_settings
- self.limit_settings = limit_settings
- self.n_cross_validations = n_cross_validations
- self.test_data = test_data
- self.test_data_size = test_data_size
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.weight_column_name = weight_column_name
+ :keyword path: [Required] The path for the route. Required.
+ :paramtype path: str
+ :keyword port: [Required] The port for the route. Required.
+ :paramtype port: int
+ """
+ super().__init__(**kwargs)
+ self.path = path
+ self.port = port
-class RegressionTrainingSettings(TrainingSettings):
- """Regression Training related configuration.
+class SASAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """SASAuthTypeWorkspaceConnectionProperties.
- :ivar enable_dnn_training: Enable recommendation of DNN models.
- :vartype enable_dnn_training: bool
- :ivar enable_model_explainability: Flag to turn on explainability on best model.
- :vartype enable_model_explainability: bool
- :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :vartype enable_onnx_compatible_models: bool
- :ivar enable_stack_ensemble: Enable stack ensemble run.
- :vartype enable_stack_ensemble: bool
- :ivar enable_vote_ensemble: Enable voting ensemble run.
- :vartype enable_vote_ensemble: bool
- :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :vartype ensemble_model_download_timeout: ~datetime.timedelta
- :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :vartype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :ivar allowed_training_algorithms: Allowed models for regression task.
- :vartype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.RegressionModels]
- :ivar blocked_training_algorithms: Blocked models for regression task.
- :vartype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.RegressionModels]
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
+ :ivar target:
+ :vartype target: str
+ :ivar credentials:
+ :vartype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionSharedAccessSignature
"""
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ }
+
_attribute_map = {
- "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
- "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
- "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
- "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
- "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
- "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
- "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
- "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
- "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
+ "target": {"key": "target", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionSharedAccessSignature"},
}
def __init__(
self,
*,
- enable_dnn_training: bool = False,
- enable_model_explainability: bool = True,
- enable_onnx_compatible_models: bool = False,
- enable_stack_ensemble: bool = True,
- enable_vote_ensemble: bool = True,
- ensemble_model_download_timeout: datetime.timedelta = "PT5M",
- stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
- allowed_training_algorithms: Optional[List[Union[str, "_models.RegressionModels"]]] = None,
- blocked_training_algorithms: Optional[List[Union[str, "_models.RegressionModels"]]] = None,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
+ target: Optional[str] = None,
+ credentials: Optional["_models.WorkspaceConnectionSharedAccessSignature"] = None,
**kwargs: Any
) -> None:
"""
- :keyword enable_dnn_training: Enable recommendation of DNN models.
- :paramtype enable_dnn_training: bool
- :keyword enable_model_explainability: Flag to turn on explainability on best model.
- :paramtype enable_model_explainability: bool
- :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :paramtype enable_onnx_compatible_models: bool
- :keyword enable_stack_ensemble: Enable stack ensemble run.
- :paramtype enable_stack_ensemble: bool
- :keyword enable_vote_ensemble: Enable voting ensemble run.
- :paramtype enable_vote_ensemble: bool
- :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :paramtype ensemble_model_download_timeout: ~datetime.timedelta
- :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :paramtype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :keyword allowed_training_algorithms: Allowed models for regression task.
- :paramtype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.RegressionModels]
- :keyword blocked_training_algorithms: Blocked models for regression task.
- :paramtype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.RegressionModels]
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
+ :keyword target:
+ :paramtype target: str
+ :keyword credentials:
+ :paramtype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionSharedAccessSignature
"""
super().__init__(
- enable_dnn_training=enable_dnn_training,
- enable_model_explainability=enable_model_explainability,
- enable_onnx_compatible_models=enable_onnx_compatible_models,
- enable_stack_ensemble=enable_stack_ensemble,
- enable_vote_ensemble=enable_vote_ensemble,
- ensemble_model_download_timeout=ensemble_model_download_timeout,
- stack_ensemble_settings=stack_ensemble_settings,
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ metadata=metadata,
+ target=target,
**kwargs
)
- self.allowed_training_algorithms = allowed_training_algorithms
- self.blocked_training_algorithms = blocked_training_algorithms
+ self.auth_type: str = "SAS"
+ self.credentials = credentials
-class ResourceId(_serialization.Model):
- """Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet.
+class SASCredentialDto(PendingUploadCredentialDto):
+ """SASCredentialDto.
All required parameters must be populated in order to send to Azure.
- :ivar id: The ID of the resource. Required.
- :vartype id: str
+ :ivar credential_type: [Required] Credential type used to authentication with storage.
+ Required. "SAS"
+ :vartype credential_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadCredentialType
+ :ivar sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
+ :vartype sas_uri: str
"""
_validation = {
- "id": {"required": True},
+ "credential_type": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
+ "credential_type": {"key": "credentialType", "type": "str"},
+ "sas_uri": {"key": "sasUri", "type": "str"},
}
- def __init__(self, *, id: str, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
+ def __init__(self, *, sas_uri: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword id: The ID of the resource. Required.
- :paramtype id: str
+ :keyword sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
+ :paramtype sas_uri: str
"""
super().__init__(**kwargs)
- self.id = id
+ self.credential_type: str = "SAS"
+ self.sas_uri = sas_uri
-class ResourceName(_serialization.Model):
- """The Resource Name.
+class SasDatastoreCredentials(DatastoreCredentials):
+ """SAS datastore credentials configuration.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to Azure.
- :ivar value: The name of the resource.
- :vartype value: str
- :ivar localized_value: The localized name of the resource.
- :vartype localized_value: str
+ :ivar credentials_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", "ServicePrincipal",
+ "KerberosKeytab", and "KerberosPassword".
+ :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ :ivar secrets: [Required] Storage container secrets. Required.
+ :vartype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
"""
_validation = {
- "value": {"readonly": True},
- "localized_value": {"readonly": True},
+ "credentials_type": {"required": True},
+ "secrets": {"required": True},
+ }
+
+ _attribute_map = {
+ "credentials_type": {"key": "credentialsType", "type": "str"},
+ "secrets": {"key": "secrets", "type": "SasDatastoreSecrets"},
+ }
+
+ def __init__(self, *, secrets: "_models.SasDatastoreSecrets", **kwargs: Any) -> None:
+ """
+ :keyword secrets: [Required] Storage container secrets. Required.
+ :paramtype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
+ """
+ super().__init__(**kwargs)
+ self.credentials_type: str = "Sas"
+ self.secrets = secrets
+
+
+class SasDatastoreSecrets(DatastoreSecrets):
+ """Datastore SAS secrets.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
+ Known values are: "AccountKey", "Certificate", "Sas", "ServicePrincipal", "KerberosPassword",
+ and "KerberosKeytab".
+ :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
+ :ivar sas_token: Storage container SAS token.
+ :vartype sas_token: str
+ """
+
+ _validation = {
+ "secrets_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "secrets_type": {"key": "secretsType", "type": "str"},
+ "sas_token": {"key": "sasToken", "type": "str"},
+ }
+
+ def __init__(self, *, sas_token: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword sas_token: Storage container SAS token.
+ :paramtype sas_token: str
+ """
+ super().__init__(**kwargs)
+ self.secrets_type: str = "Sas"
+ self.sas_token = sas_token
+
+
+class ScaleSettings(_serialization.Model):
+ """scale settings for AML Compute.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar max_node_count: Max number of nodes to use. Required.
+ :vartype max_node_count: int
+ :ivar min_node_count: Min number of nodes to use.
+ :vartype min_node_count: int
+ :ivar node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This
+ string needs to be in the RFC Format.
+ :vartype node_idle_time_before_scale_down: ~datetime.timedelta
+ """
+
+ _validation = {
+ "max_node_count": {"required": True},
+ }
+
+ _attribute_map = {
+ "max_node_count": {"key": "maxNodeCount", "type": "int"},
+ "min_node_count": {"key": "minNodeCount", "type": "int"},
+ "node_idle_time_before_scale_down": {"key": "nodeIdleTimeBeforeScaleDown", "type": "duration"},
}
+ def __init__(
+ self,
+ *,
+ max_node_count: int,
+ min_node_count: int = 0,
+ node_idle_time_before_scale_down: Optional[datetime.timedelta] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword max_node_count: Max number of nodes to use. Required.
+ :paramtype max_node_count: int
+ :keyword min_node_count: Min number of nodes to use.
+ :paramtype min_node_count: int
+ :keyword node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This
+ string needs to be in the RFC Format.
+ :paramtype node_idle_time_before_scale_down: ~datetime.timedelta
+ """
+ super().__init__(**kwargs)
+ self.max_node_count = max_node_count
+ self.min_node_count = min_node_count
+ self.node_idle_time_before_scale_down = node_idle_time_before_scale_down
+
+
+class ScaleSettingsInformation(_serialization.Model):
+ """Desired scale settings for the amlCompute.
+
+ :ivar scale_settings: scale settings for AML Compute.
+ :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
+ """
+
_attribute_map = {
- "value": {"key": "value", "type": "str"},
- "localized_value": {"key": "localizedValue", "type": "str"},
+ "scale_settings": {"key": "scaleSettings", "type": "ScaleSettings"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(self, *, scale_settings: Optional["_models.ScaleSettings"] = None, **kwargs: Any) -> None:
+ """
+ :keyword scale_settings: scale settings for AML Compute.
+ :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
+ """
super().__init__(**kwargs)
- self.value = None
- self.localized_value = None
+ self.scale_settings = scale_settings
-class ResourceQuota(_serialization.Model):
- """The quota assigned to a resource.
+class Schedule(ProxyResource):
+ """Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar id: Specifies the resource ID.
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
- :ivar aml_workspace_location: Region of the AML workspace in the id.
- :vartype aml_workspace_location: str
- :ivar type: Specifies the resource type.
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
:vartype type: str
- :ivar name: Name of the resource.
- :vartype name: ~azure.mgmt.machinelearningservices.models.ResourceName
- :ivar limit: The maximum permitted quota of the resource.
- :vartype limit: int
- :ivar unit: An enum describing the unit of quota measurement. "Count"
- :vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ScheduleProperties
"""
_validation = {
"id": {"readonly": True},
- "aml_workspace_location": {"readonly": True},
- "type": {"readonly": True},
"name": {"readonly": True},
- "limit": {"readonly": True},
- "unit": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
- "aml_workspace_location": {"key": "amlWorkspaceLocation", "type": "str"},
+ "name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
- "name": {"key": "name", "type": "ResourceName"},
- "limit": {"key": "limit", "type": "int"},
- "unit": {"key": "unit", "type": "str"},
- }
-
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.id = None
- self.aml_workspace_location = None
- self.type = None
- self.name = None
- self.limit = None
- self.unit = None
-
-
-class Route(_serialization.Model):
- """Route.
-
- All required parameters must be populated in order to send to Azure.
-
- :ivar path: [Required] The path for the route. Required.
- :vartype path: str
- :ivar port: [Required] The port for the route. Required.
- :vartype port: int
- """
-
- _validation = {
- "path": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- "port": {"required": True},
- }
-
- _attribute_map = {
- "path": {"key": "path", "type": "str"},
- "port": {"key": "port", "type": "int"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "ScheduleProperties"},
}
- def __init__(self, *, path: str, port: int, **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.ScheduleProperties", **kwargs: Any) -> None:
"""
- :keyword path: [Required] The path for the route. Required.
- :paramtype path: str
- :keyword port: [Required] The port for the route. Required.
- :paramtype port: int
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ScheduleProperties
"""
super().__init__(**kwargs)
- self.path = path
- self.port = port
-
+ self.properties = properties
-class SASAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
- """SASAuthTypeWorkspaceConnectionProperties.
- All required parameters must be populated in order to send to Azure.
+class ScheduleBase(_serialization.Model):
+ """ScheduleBase.
- :ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
- :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
- :ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :ivar target:
- :vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :ivar credentials:
- :vartype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionSharedAccessSignature
+ :ivar id: A system assigned id for the schedule.
+ :vartype id: str
+ :ivar provisioning_status: The current deployment state of schedule. Known values are:
+ "Completed", "Provisioning", and "Failed".
+ :vartype provisioning_status: str or
+ ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningState
+ :ivar status: Is the schedule enabled or disabled?. Known values are: "Enabled" and "Disabled".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
"""
- _validation = {
- "auth_type": {"required": True},
- }
-
_attribute_map = {
- "auth_type": {"key": "authType", "type": "str"},
- "category": {"key": "category", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
- "credentials": {"key": "credentials", "type": "WorkspaceConnectionSharedAccessSignature"},
+ "id": {"key": "id", "type": "str"},
+ "provisioning_status": {"key": "provisioningStatus", "type": "str"},
+ "status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
- category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
- target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
- credentials: Optional["_models.WorkspaceConnectionSharedAccessSignature"] = None,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ provisioning_status: Optional[Union[str, "_models.ScheduleProvisioningState"]] = None,
+ status: Optional[Union[str, "_models.ScheduleStatus"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :keyword target:
- :paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :keyword credentials:
- :paramtype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionSharedAccessSignature
+ :keyword id: A system assigned id for the schedule.
+ :paramtype id: str
+ :keyword provisioning_status: The current deployment state of schedule. Known values are:
+ "Completed", "Provisioning", and "Failed".
+ :paramtype provisioning_status: str or
+ ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningState
+ :keyword status: Is the schedule enabled or disabled?. Known values are: "Enabled" and
+ "Disabled".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
"""
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
- self.auth_type: str = "SAS"
- self.credentials = credentials
+ super().__init__(**kwargs)
+ self.id = id
+ self.provisioning_status = provisioning_status
+ self.status = status
-class SASCredentialDto(PendingUploadCredentialDto):
- """SASCredentialDto.
+class ScheduleProperties(ResourceBase):
+ """Base definition of a schedule.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
- :ivar credential_type: [Required] Credential type used to authentication with storage.
- Required. "SAS"
- :vartype credential_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadCredentialType
- :ivar sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
- :vartype sas_uri: str
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar action: [Required] Specifies the action of the schedule. Required.
+ :vartype action: ~azure.mgmt.machinelearningservices.models.ScheduleActionBase
+ :ivar display_name: Display name of schedule.
+ :vartype display_name: str
+ :ivar is_enabled: Is the schedule enabled?.
+ :vartype is_enabled: bool
+ :ivar provisioning_state: Provisioning state for the schedule. Known values are: "Creating",
+ "Updating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningStatus
+ :ivar trigger: [Required] Specifies the trigger details. Required.
+ :vartype trigger: ~azure.mgmt.machinelearningservices.models.TriggerBase
"""
_validation = {
- "credential_type": {"required": True},
+ "action": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "trigger": {"required": True},
}
_attribute_map = {
- "credential_type": {"key": "credentialType", "type": "str"},
- "sas_uri": {"key": "sasUri", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "action": {"key": "action", "type": "ScheduleActionBase"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "is_enabled": {"key": "isEnabled", "type": "bool"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "trigger": {"key": "trigger", "type": "TriggerBase"},
}
- def __init__(self, *, sas_uri: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ action: "_models.ScheduleActionBase",
+ trigger: "_models.TriggerBase",
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ display_name: Optional[str] = None,
+ is_enabled: bool = True,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
- :paramtype sas_uri: str
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword action: [Required] Specifies the action of the schedule. Required.
+ :paramtype action: ~azure.mgmt.machinelearningservices.models.ScheduleActionBase
+ :keyword display_name: Display name of schedule.
+ :paramtype display_name: str
+ :keyword is_enabled: Is the schedule enabled?.
+ :paramtype is_enabled: bool
+ :keyword trigger: [Required] Specifies the trigger details. Required.
+ :paramtype trigger: ~azure.mgmt.machinelearningservices.models.TriggerBase
"""
- super().__init__(**kwargs)
- self.credential_type: str = "SAS"
- self.sas_uri = sas_uri
-
-
-class SasDatastoreCredentials(DatastoreCredentials):
- """SAS datastore credentials configuration.
-
- All required parameters must be populated in order to send to Azure.
+ super().__init__(description=description, properties=properties, tags=tags, **kwargs)
+ self.action = action
+ self.display_name = display_name
+ self.is_enabled = is_enabled
+ self.provisioning_state = None
+ self.trigger = trigger
- :ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
- :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
- :ivar secrets: [Required] Storage container secrets. Required.
- :vartype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
- """
- _validation = {
- "credentials_type": {"required": True},
- "secrets": {"required": True},
- }
+class ScheduleResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of Schedule entities.
+
+ :ivar next_link: The link to the next page of Schedule objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type Schedule.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Schedule]
+ """
_attribute_map = {
- "credentials_type": {"key": "credentialsType", "type": "str"},
- "secrets": {"key": "secrets", "type": "SasDatastoreSecrets"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[Schedule]"},
}
- def __init__(self, *, secrets: "_models.SasDatastoreSecrets", **kwargs: Any) -> None:
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.Schedule"]] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword secrets: [Required] Storage container secrets. Required.
- :paramtype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
+ :keyword next_link: The link to the next page of Schedule objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type Schedule.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.Schedule]
"""
super().__init__(**kwargs)
- self.credentials_type: str = "Sas"
- self.secrets = secrets
-
+ self.next_link = next_link
+ self.value = value
-class SasDatastoreSecrets(DatastoreSecrets):
- """Datastore SAS secrets.
- All required parameters must be populated in order to send to Azure.
+class ScriptReference(_serialization.Model):
+ """Script reference.
- :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
- Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
- :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
- :ivar sas_token: Storage container SAS token.
- :vartype sas_token: str
+ :ivar script_source: The storage source of the script: inline, workspace.
+ :vartype script_source: str
+ :ivar script_data: The location of scripts in the mounted volume.
+ :vartype script_data: str
+ :ivar script_arguments: Optional command line arguments passed to the script to run.
+ :vartype script_arguments: str
+ :ivar timeout: Optional time period passed to timeout command.
+ :vartype timeout: str
"""
- _validation = {
- "secrets_type": {"required": True},
- }
-
_attribute_map = {
- "secrets_type": {"key": "secretsType", "type": "str"},
- "sas_token": {"key": "sasToken", "type": "str"},
+ "script_source": {"key": "scriptSource", "type": "str"},
+ "script_data": {"key": "scriptData", "type": "str"},
+ "script_arguments": {"key": "scriptArguments", "type": "str"},
+ "timeout": {"key": "timeout", "type": "str"},
}
- def __init__(self, *, sas_token: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ script_source: Optional[str] = None,
+ script_data: Optional[str] = None,
+ script_arguments: Optional[str] = None,
+ timeout: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword sas_token: Storage container SAS token.
- :paramtype sas_token: str
+ :keyword script_source: The storage source of the script: inline, workspace.
+ :paramtype script_source: str
+ :keyword script_data: The location of scripts in the mounted volume.
+ :paramtype script_data: str
+ :keyword script_arguments: Optional command line arguments passed to the script to run.
+ :paramtype script_arguments: str
+ :keyword timeout: Optional time period passed to timeout command.
+ :paramtype timeout: str
"""
super().__init__(**kwargs)
- self.secrets_type: str = "Sas"
- self.sas_token = sas_token
-
+ self.script_source = script_source
+ self.script_data = script_data
+ self.script_arguments = script_arguments
+ self.timeout = timeout
-class ScaleSettings(_serialization.Model):
- """scale settings for AML Compute.
- All required parameters must be populated in order to send to Azure.
+class ScriptsToExecute(_serialization.Model):
+ """Customized setup scripts.
- :ivar max_node_count: Max number of nodes to use. Required.
- :vartype max_node_count: int
- :ivar min_node_count: Min number of nodes to use.
- :vartype min_node_count: int
- :ivar node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This
- string needs to be in the RFC Format.
- :vartype node_idle_time_before_scale_down: ~datetime.timedelta
+ :ivar startup_script: Script that's run every time the machine starts.
+ :vartype startup_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
+ :ivar creation_script: Script that's run only once during provision of the compute.
+ :vartype creation_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
"""
- _validation = {
- "max_node_count": {"required": True},
- }
-
_attribute_map = {
- "max_node_count": {"key": "maxNodeCount", "type": "int"},
- "min_node_count": {"key": "minNodeCount", "type": "int"},
- "node_idle_time_before_scale_down": {"key": "nodeIdleTimeBeforeScaleDown", "type": "duration"},
+ "startup_script": {"key": "startupScript", "type": "ScriptReference"},
+ "creation_script": {"key": "creationScript", "type": "ScriptReference"},
}
def __init__(
self,
*,
- max_node_count: int,
- min_node_count: int = 0,
- node_idle_time_before_scale_down: Optional[datetime.timedelta] = None,
+ startup_script: Optional["_models.ScriptReference"] = None,
+ creation_script: Optional["_models.ScriptReference"] = None,
**kwargs: Any
) -> None:
"""
- :keyword max_node_count: Max number of nodes to use. Required.
- :paramtype max_node_count: int
- :keyword min_node_count: Min number of nodes to use.
- :paramtype min_node_count: int
- :keyword node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This
- string needs to be in the RFC Format.
- :paramtype node_idle_time_before_scale_down: ~datetime.timedelta
+ :keyword startup_script: Script that's run every time the machine starts.
+ :paramtype startup_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
+ :keyword creation_script: Script that's run only once during provision of the compute.
+ :paramtype creation_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
"""
super().__init__(**kwargs)
- self.max_node_count = max_node_count
- self.min_node_count = min_node_count
- self.node_idle_time_before_scale_down = node_idle_time_before_scale_down
+ self.startup_script = startup_script
+ self.creation_script = creation_script
-class ScaleSettingsInformation(_serialization.Model):
- """Desired scale settings for the amlCompute.
+class SecretConfiguration(_serialization.Model):
+ """Secret Configuration definition.
- :ivar scale_settings: scale settings for AML Compute.
- :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
+ :ivar uri: Secret Uri.
+ Sample Uri : https://myvault.vault.azure.net/secrets/mysecretname/secretversion.
+ :vartype uri: str
+ :ivar workspace_secret_name: Name of secret in workspace key vault.
+ :vartype workspace_secret_name: str
"""
_attribute_map = {
- "scale_settings": {"key": "scaleSettings", "type": "ScaleSettings"},
+ "uri": {"key": "uri", "type": "str"},
+ "workspace_secret_name": {"key": "workspaceSecretName", "type": "str"},
}
- def __init__(self, *, scale_settings: Optional["_models.ScaleSettings"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, uri: Optional[str] = None, workspace_secret_name: Optional[str] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword scale_settings: scale settings for AML Compute.
- :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
+ :keyword uri: Secret Uri.
+ Sample Uri : https://myvault.vault.azure.net/secrets/mysecretname/secretversion.
+ :paramtype uri: str
+ :keyword workspace_secret_name: Name of secret in workspace key vault.
+ :paramtype workspace_secret_name: str
"""
super().__init__(**kwargs)
- self.scale_settings = scale_settings
+ self.uri = uri
+ self.workspace_secret_name = workspace_secret_name
-class Schedule(Resource):
- """Azure Resource Manager resource envelope.
+class ServerlessEndpoint(TrackedResource):
+ """ServerlessEndpoint.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -18715,8 +27952,19 @@ class Schedule(Resource):
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :vartype kind: str
:ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ScheduleProperties
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ServerlessEndpointProperties
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
@@ -18724,6 +27972,7 @@ class Schedule(Resource):
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
+ "location": {"required": True},
"properties": {"required": True},
}
@@ -18732,254 +27981,287 @@ class Schedule(Resource):
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "ScheduleProperties"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
+ "properties": {"key": "properties", "type": "ServerlessEndpointProperties"},
+ "sku": {"key": "sku", "type": "Sku"},
}
- def __init__(self, *, properties: "_models.ScheduleProperties", **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ location: str,
+ properties: "_models.ServerlessEndpointProperties",
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
+ sku: Optional["_models.Sku"] = None,
+ **kwargs: Any
+ ) -> None:
"""
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :paramtype kind: str
:keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ScheduleProperties
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ServerlessEndpointProperties
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
- super().__init__(**kwargs)
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.identity = identity
+ self.kind = kind
self.properties = properties
+ self.sku = sku
-class ScheduleBase(_serialization.Model):
- """ScheduleBase.
+class ServerlessEndpointCapacityReservation(_serialization.Model):
+ """ServerlessEndpointCapacityReservation.
- :ivar id: A system assigned id for the schedule.
- :vartype id: str
- :ivar provisioning_status: The current deployment state of schedule. Known values are:
- "Completed", "Provisioning", and "Failed".
- :vartype provisioning_status: str or
- ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningState
- :ivar status: Is the schedule enabled or disabled?. Known values are: "Enabled" and "Disabled".
- :vartype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar capacity_reservation_group_id: [Required] Specifies a capacity reservation group ID to
+ allocate capacity from. Required.
+ :vartype capacity_reservation_group_id: str
+ :ivar endpoint_reserved_capacity: Specifies a capacity amount to reserve for this endpoint
+ within the parent capacity reservation group.
+ :vartype endpoint_reserved_capacity: int
"""
+ _validation = {
+ "capacity_reservation_group_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "provisioning_status": {"key": "provisioningStatus", "type": "str"},
- "status": {"key": "status", "type": "str"},
+ "capacity_reservation_group_id": {"key": "capacityReservationGroupId", "type": "str"},
+ "endpoint_reserved_capacity": {"key": "endpointReservedCapacity", "type": "int"},
}
def __init__(
- self,
- *,
- id: Optional[str] = None, # pylint: disable=redefined-builtin
- provisioning_status: Optional[Union[str, "_models.ScheduleProvisioningState"]] = None,
- status: Optional[Union[str, "_models.ScheduleStatus"]] = None,
- **kwargs: Any
+ self, *, capacity_reservation_group_id: str, endpoint_reserved_capacity: Optional[int] = None, **kwargs: Any
) -> None:
- """
- :keyword id: A system assigned id for the schedule.
- :paramtype id: str
- :keyword provisioning_status: The current deployment state of schedule. Known values are:
- "Completed", "Provisioning", and "Failed".
- :paramtype provisioning_status: str or
- ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningState
- :keyword status: Is the schedule enabled or disabled?. Known values are: "Enabled" and
- "Disabled".
- :paramtype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
- """
- super().__init__(**kwargs)
- self.id = id
- self.provisioning_status = provisioning_status
- self.status = status
-
-
-class ScheduleProperties(ResourceBase):
- """Base definition of a schedule.
-
- Variables are only populated by the server, and will be ignored when sending a request.
-
- All required parameters must be populated in order to send to Azure.
-
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar action: [Required] Specifies the action of the schedule. Required.
- :vartype action: ~azure.mgmt.machinelearningservices.models.ScheduleActionBase
- :ivar display_name: Display name of schedule.
- :vartype display_name: str
- :ivar is_enabled: Is the schedule enabled?.
- :vartype is_enabled: bool
- :ivar provisioning_state: Provisioning state for the schedule. Known values are: "Creating",
- "Updating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ """
+ :keyword capacity_reservation_group_id: [Required] Specifies a capacity reservation group ID to
+ allocate capacity from. Required.
+ :paramtype capacity_reservation_group_id: str
+ :keyword endpoint_reserved_capacity: Specifies a capacity amount to reserve for this endpoint
+ within the parent capacity reservation group.
+ :paramtype endpoint_reserved_capacity: int
+ """
+ super().__init__(**kwargs)
+ self.capacity_reservation_group_id = capacity_reservation_group_id
+ self.endpoint_reserved_capacity = endpoint_reserved_capacity
+
+
+class ServerlessEndpointProperties(_serialization.Model):
+ """ServerlessEndpointProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar auth_mode: Specifies the authentication mode for the Serverless endpoint. Known values
+ are: "Key" and "AAD".
+ :vartype auth_mode: str or
+ ~azure.mgmt.machinelearningservices.models.ServerlessInferenceEndpointAuthMode
+ :ivar capacity_reservation: Optional capacity reservation information for the endpoint. When
+ specified, the Serverless Endpoint
+ will be allocated capacity from the specified capacity reservation group.
+ :vartype capacity_reservation:
+ ~azure.mgmt.machinelearningservices.models.ServerlessEndpointCapacityReservation
+ :ivar inference_endpoint: The inference uri to target when making requests against the
+ serverless endpoint.
+ :vartype inference_endpoint:
+ ~azure.mgmt.machinelearningservices.models.ServerlessInferenceEndpoint
+ :ivar offer: [Required] The publisher-defined Serverless Offer to provision the endpoint with.
+ Required.
+ :vartype offer: ~azure.mgmt.machinelearningservices.models.ServerlessOffer
+ :ivar provisioning_state: Provisioning state for the endpoint. Known values are: "Creating",
+ "Deleting", "Succeeded", "Failed", "Updating", and "Canceled".
:vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningStatus
- :ivar trigger: [Required] Specifies the trigger details. Required.
- :vartype trigger: ~azure.mgmt.machinelearningservices.models.TriggerBase
+ ~azure.mgmt.machinelearningservices.models.EndpointProvisioningState
"""
_validation = {
- "action": {"required": True},
+ "inference_endpoint": {"readonly": True},
+ "offer": {"required": True},
"provisioning_state": {"readonly": True},
- "trigger": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "action": {"key": "action", "type": "ScheduleActionBase"},
- "display_name": {"key": "displayName", "type": "str"},
- "is_enabled": {"key": "isEnabled", "type": "bool"},
+ "auth_mode": {"key": "authMode", "type": "str"},
+ "capacity_reservation": {"key": "capacityReservation", "type": "ServerlessEndpointCapacityReservation"},
+ "inference_endpoint": {"key": "inferenceEndpoint", "type": "ServerlessInferenceEndpoint"},
+ "offer": {"key": "offer", "type": "ServerlessOffer"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
- "trigger": {"key": "trigger", "type": "TriggerBase"},
}
def __init__(
self,
*,
- action: "_models.ScheduleActionBase",
- trigger: "_models.TriggerBase",
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- display_name: Optional[str] = None,
- is_enabled: bool = True,
+ offer: "_models.ServerlessOffer",
+ auth_mode: Optional[Union[str, "_models.ServerlessInferenceEndpointAuthMode"]] = None,
+ capacity_reservation: Optional["_models.ServerlessEndpointCapacityReservation"] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword action: [Required] Specifies the action of the schedule. Required.
- :paramtype action: ~azure.mgmt.machinelearningservices.models.ScheduleActionBase
- :keyword display_name: Display name of schedule.
- :paramtype display_name: str
- :keyword is_enabled: Is the schedule enabled?.
- :paramtype is_enabled: bool
- :keyword trigger: [Required] Specifies the trigger details. Required.
- :paramtype trigger: ~azure.mgmt.machinelearningservices.models.TriggerBase
+ :keyword auth_mode: Specifies the authentication mode for the Serverless endpoint. Known values
+ are: "Key" and "AAD".
+ :paramtype auth_mode: str or
+ ~azure.mgmt.machinelearningservices.models.ServerlessInferenceEndpointAuthMode
+ :keyword capacity_reservation: Optional capacity reservation information for the endpoint. When
+ specified, the Serverless Endpoint
+ will be allocated capacity from the specified capacity reservation group.
+ :paramtype capacity_reservation:
+ ~azure.mgmt.machinelearningservices.models.ServerlessEndpointCapacityReservation
+ :keyword offer: [Required] The publisher-defined Serverless Offer to provision the endpoint
+ with. Required.
+ :paramtype offer: ~azure.mgmt.machinelearningservices.models.ServerlessOffer
"""
- super().__init__(description=description, properties=properties, tags=tags, **kwargs)
- self.action = action
- self.display_name = display_name
- self.is_enabled = is_enabled
+ super().__init__(**kwargs)
+ self.auth_mode = auth_mode
+ self.capacity_reservation = capacity_reservation
+ self.inference_endpoint = None
+ self.offer = offer
self.provisioning_state = None
- self.trigger = trigger
-class ScheduleResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of Schedule entities.
+class ServerlessEndpointStatus(_serialization.Model):
+ """ServerlessEndpointStatus.
- :ivar next_link: The link to the next page of Schedule objects. If null, there are no
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar metrics: Dictionary of :code:``.
+ :vartype metrics: dict[str, str]
+ """
+
+ _validation = {
+ "metrics": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "metrics": {"key": "metrics", "type": "{str}"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.metrics = None
+
+
+class ServerlessEndpointTrackedResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of ServerlessEndpoint entities.
+
+ :ivar next_link: The link to the next page of ServerlessEndpoint objects. If null, there are no
additional pages.
:vartype next_link: str
- :ivar value: An array of objects of type Schedule.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.Schedule]
+ :ivar value: An array of objects of type ServerlessEndpoint.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[Schedule]"},
+ "value": {"key": "value", "type": "[ServerlessEndpoint]"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.Schedule"]] = None, **kwargs: Any
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.ServerlessEndpoint"]] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of Schedule objects. If null, there are no
- additional pages.
+ :keyword next_link: The link to the next page of ServerlessEndpoint objects. If null, there are
+ no additional pages.
:paramtype next_link: str
- :keyword value: An array of objects of type Schedule.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.Schedule]
+ :keyword value: An array of objects of type ServerlessEndpoint.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
"""
super().__init__(**kwargs)
self.next_link = next_link
self.value = value
-class ScriptReference(_serialization.Model):
- """Script reference.
+class ServerlessInferenceEndpoint(_serialization.Model):
+ """ServerlessInferenceEndpoint.
- :ivar script_source: The storage source of the script: workspace.
- :vartype script_source: str
- :ivar script_data: The location of scripts in the mounted volume.
- :vartype script_data: str
- :ivar script_arguments: Optional command line arguments passed to the script to run.
- :vartype script_arguments: str
- :ivar timeout: Optional time period passed to timeout command.
- :vartype timeout: str
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar headers: Specifies any required headers to target this serverless endpoint.
+ :vartype headers: dict[str, str]
+ :ivar uri: [Required] The inference uri to target when making requests against the Serverless
+ Endpoint. Required.
+ :vartype uri: str
"""
+ _validation = {
+ "headers": {"readonly": True},
+ "uri": {"required": True},
+ }
+
_attribute_map = {
- "script_source": {"key": "scriptSource", "type": "str"},
- "script_data": {"key": "scriptData", "type": "str"},
- "script_arguments": {"key": "scriptArguments", "type": "str"},
- "timeout": {"key": "timeout", "type": "str"},
+ "headers": {"key": "headers", "type": "{str}"},
+ "uri": {"key": "uri", "type": "str"},
}
- def __init__(
- self,
- *,
- script_source: Optional[str] = None,
- script_data: Optional[str] = None,
- script_arguments: Optional[str] = None,
- timeout: Optional[str] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, uri: str, **kwargs: Any) -> None:
"""
- :keyword script_source: The storage source of the script: workspace.
- :paramtype script_source: str
- :keyword script_data: The location of scripts in the mounted volume.
- :paramtype script_data: str
- :keyword script_arguments: Optional command line arguments passed to the script to run.
- :paramtype script_arguments: str
- :keyword timeout: Optional time period passed to timeout command.
- :paramtype timeout: str
+ :keyword uri: [Required] The inference uri to target when making requests against the
+ Serverless Endpoint. Required.
+ :paramtype uri: str
"""
super().__init__(**kwargs)
- self.script_source = script_source
- self.script_data = script_data
- self.script_arguments = script_arguments
- self.timeout = timeout
+ self.headers = None
+ self.uri = uri
-class ScriptsToExecute(_serialization.Model):
- """Customized setup scripts.
+class ServerlessOffer(_serialization.Model):
+ """ServerlessOffer.
- :ivar startup_script: Script that's run every time the machine starts.
- :vartype startup_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
- :ivar creation_script: Script that's run only once during provision of the compute.
- :vartype creation_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar offer_name: [Required] The name of the Serverless Offer. Required.
+ :vartype offer_name: str
+ :ivar publisher: [Required] Publisher name of the Serverless Offer. Required.
+ :vartype publisher: str
"""
+ _validation = {
+ "offer_name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "publisher": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
_attribute_map = {
- "startup_script": {"key": "startupScript", "type": "ScriptReference"},
- "creation_script": {"key": "creationScript", "type": "ScriptReference"},
+ "offer_name": {"key": "offerName", "type": "str"},
+ "publisher": {"key": "publisher", "type": "str"},
}
- def __init__(
- self,
- *,
- startup_script: Optional["_models.ScriptReference"] = None,
- creation_script: Optional["_models.ScriptReference"] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, offer_name: str, publisher: str, **kwargs: Any) -> None:
"""
- :keyword startup_script: Script that's run every time the machine starts.
- :paramtype startup_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
- :keyword creation_script: Script that's run only once during provision of the compute.
- :paramtype creation_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
+ :keyword offer_name: [Required] The name of the Serverless Offer. Required.
+ :paramtype offer_name: str
+ :keyword publisher: [Required] Publisher name of the Serverless Offer. Required.
+ :paramtype publisher: str
"""
super().__init__(**kwargs)
- self.startup_script = startup_script
- self.creation_script = creation_script
+ self.offer_name = offer_name
+ self.publisher = publisher
class ServiceManagedResourcesSettings(_serialization.Model):
"""ServiceManagedResourcesSettings.
- :ivar cosmos_db: The settings for the service managed cosmosdb account.
+ :ivar cosmos_db:
:vartype cosmos_db: ~azure.mgmt.machinelearningservices.models.CosmosDbSettings
"""
@@ -18989,20 +28271,111 @@ class ServiceManagedResourcesSettings(_serialization.Model):
def __init__(self, *, cosmos_db: Optional["_models.CosmosDbSettings"] = None, **kwargs: Any) -> None:
"""
- :keyword cosmos_db: The settings for the service managed cosmosdb account.
+ :keyword cosmos_db:
:paramtype cosmos_db: ~azure.mgmt.machinelearningservices.models.CosmosDbSettings
"""
super().__init__(**kwargs)
self.cosmos_db = cosmos_db
+class ServicePrincipalAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """ServicePrincipalAuthTypeWorkspaceConnectionProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
+ :ivar target:
+ :vartype target: str
+ :ivar credentials:
+ :vartype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionServicePrincipal
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
+ "target": {"key": "target", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionServicePrincipal"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
+ target: Optional[str] = None,
+ credentials: Optional["_models.WorkspaceConnectionServicePrincipal"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
+ :keyword target:
+ :paramtype target: str
+ :keyword credentials:
+ :paramtype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionServicePrincipal
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ metadata=metadata,
+ target=target,
+ **kwargs
+ )
+ self.auth_type: str = "ServicePrincipal"
+ self.credentials = credentials
+
+
class ServicePrincipalDatastoreCredentials(DatastoreCredentials):
"""Service Principal datastore credentials configuration.
All required parameters must be populated in order to send to Azure.
:ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", "ServicePrincipal",
+ "KerberosKeytab", and "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
:ivar authority_url: Authority URL used for authentication.
:vartype authority_url: str
@@ -19064,35 +28437,147 @@ def __init__(
self.tenant_id = tenant_id
-class ServicePrincipalDatastoreSecrets(DatastoreSecrets):
- """Datastore Service Principal secrets.
+class ServicePrincipalDatastoreSecrets(DatastoreSecrets):
+ """Datastore Service Principal secrets.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
+ Known values are: "AccountKey", "Certificate", "Sas", "ServicePrincipal", "KerberosPassword",
+ and "KerberosKeytab".
+ :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
+ :ivar client_secret: Service principal secret.
+ :vartype client_secret: str
+ """
+
+ _validation = {
+ "secrets_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "secrets_type": {"key": "secretsType", "type": "str"},
+ "client_secret": {"key": "clientSecret", "type": "str"},
+ }
+
+ def __init__(self, *, client_secret: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword client_secret: Service principal secret.
+ :paramtype client_secret: str
+ """
+ super().__init__(**kwargs)
+ self.secrets_type: str = "ServicePrincipal"
+ self.client_secret = client_secret
+
+
+class ServiceTagDestination(_serialization.Model):
+ """Service Tag destination for a Service Tag Outbound Rule for the managed network of a machine
+ learning workspace.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar action: The action enum for networking rule. Known values are: "Allow" and "Deny".
+ :vartype action: str or ~azure.mgmt.machinelearningservices.models.RuleAction
+ :ivar address_prefixes: Optional, if provided, the ServiceTag property will be ignored.
+ :vartype address_prefixes: list[str]
+ :ivar port_ranges:
+ :vartype port_ranges: str
+ :ivar protocol:
+ :vartype protocol: str
+ :ivar service_tag:
+ :vartype service_tag: str
+ """
+
+ _validation = {
+ "address_prefixes": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "action": {"key": "action", "type": "str"},
+ "address_prefixes": {"key": "addressPrefixes", "type": "[str]"},
+ "port_ranges": {"key": "portRanges", "type": "str"},
+ "protocol": {"key": "protocol", "type": "str"},
+ "service_tag": {"key": "serviceTag", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ action: Optional[Union[str, "_models.RuleAction"]] = None,
+ port_ranges: Optional[str] = None,
+ protocol: Optional[str] = None,
+ service_tag: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword action: The action enum for networking rule. Known values are: "Allow" and "Deny".
+ :paramtype action: str or ~azure.mgmt.machinelearningservices.models.RuleAction
+ :keyword port_ranges:
+ :paramtype port_ranges: str
+ :keyword protocol:
+ :paramtype protocol: str
+ :keyword service_tag:
+ :paramtype service_tag: str
+ """
+ super().__init__(**kwargs)
+ self.action = action
+ self.address_prefixes = None
+ self.port_ranges = port_ranges
+ self.protocol = protocol
+ self.service_tag = service_tag
+
+
+class ServiceTagOutboundRule(OutboundRule):
+ """Service Tag Outbound Rule for the managed network of a machine learning workspace.
All required parameters must be populated in order to send to Azure.
- :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
- Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
- :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
- :ivar client_secret: Service principal secret.
- :vartype client_secret: str
+ :ivar category: Category of a managed network outbound rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", and "UserDefined".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :ivar status: Type of a managed network outbound rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar type: Type of a managed network outbound rule of a machine learning workspace. Required.
+ Known values are: "FQDN", "PrivateEndpoint", and "ServiceTag".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.RuleType
+ :ivar destination: Service Tag destination for a Service Tag Outbound Rule for the managed
+ network of a machine learning workspace.
+ :vartype destination: ~azure.mgmt.machinelearningservices.models.ServiceTagDestination
"""
_validation = {
- "secrets_type": {"required": True},
+ "type": {"required": True},
}
_attribute_map = {
- "secrets_type": {"key": "secretsType", "type": "str"},
- "client_secret": {"key": "clientSecret", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "status": {"key": "status", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "destination": {"key": "destination", "type": "ServiceTagDestination"},
}
- def __init__(self, *, client_secret: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.RuleCategory"]] = None,
+ status: Optional[Union[str, "_models.RuleStatus"]] = None,
+ destination: Optional["_models.ServiceTagDestination"] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword client_secret: Service principal secret.
- :paramtype client_secret: str
+ :keyword category: Category of a managed network outbound rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", and "UserDefined".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :keyword status: Type of a managed network outbound rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :keyword destination: Service Tag destination for a Service Tag Outbound Rule for the managed
+ network of a machine learning workspace.
+ :paramtype destination: ~azure.mgmt.machinelearningservices.models.ServiceTagDestination
"""
- super().__init__(**kwargs)
- self.secrets_type: str = "ServicePrincipal"
- self.client_secret = client_secret
+ super().__init__(category=category, status=status, **kwargs)
+ self.type: str = "ServiceTag"
+ self.destination = destination
class SetupScripts(_serialization.Model):
@@ -19120,23 +28605,22 @@ class SharedPrivateLinkResource(_serialization.Model):
:ivar name: Unique name of the private link.
:vartype name: str
- :ivar private_link_resource_id: The resource id that private link links to.
- :vartype private_link_resource_id: str
- :ivar group_id: The private link resource group id.
+ :ivar group_id: group id of the private link.
:vartype group_id: str
+ :ivar private_link_resource_id: the resource id that private link links to.
+ :vartype private_link_resource_id: str
:ivar request_message: Request message.
:vartype request_message: str
- :ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
- of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
- "Timeout".
+ :ivar status: Connection status of the service consumer with the service provider. Known values
+ are: "Approved", "Pending", "Rejected", "Disconnected", and "Timeout".
:vartype status: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
+ ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
- "private_link_resource_id": {"key": "properties.privateLinkResourceId", "type": "str"},
"group_id": {"key": "properties.groupId", "type": "str"},
+ "private_link_resource_id": {"key": "properties.privateLinkResourceId", "type": "str"},
"request_message": {"key": "properties.requestMessage", "type": "str"},
"status": {"key": "properties.status", "type": "str"},
}
@@ -19145,31 +28629,30 @@ def __init__(
self,
*,
name: Optional[str] = None,
- private_link_resource_id: Optional[str] = None,
group_id: Optional[str] = None,
+ private_link_resource_id: Optional[str] = None,
request_message: Optional[str] = None,
- status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
+ status: Optional[Union[str, "_models.EndpointServiceConnectionStatus"]] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Unique name of the private link.
:paramtype name: str
- :keyword private_link_resource_id: The resource id that private link links to.
- :paramtype private_link_resource_id: str
- :keyword group_id: The private link resource group id.
+ :keyword group_id: group id of the private link.
:paramtype group_id: str
+ :keyword private_link_resource_id: the resource id that private link links to.
+ :paramtype private_link_resource_id: str
:keyword request_message: Request message.
:paramtype request_message: str
- :keyword status: Indicates whether the connection has been Approved/Rejected/Removed by the
- owner of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
- "Timeout".
+ :keyword status: Connection status of the service consumer with the service provider. Known
+ values are: "Approved", "Pending", "Rejected", "Disconnected", and "Timeout".
:paramtype status: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
+ ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
"""
super().__init__(**kwargs)
self.name = name
- self.private_link_resource_id = private_link_resource_id
self.group_id = group_id
+ self.private_link_resource_id = private_link_resource_id
self.request_message = request_message
self.status = status
@@ -19378,27 +28861,387 @@ class SkuSetting(_serialization.Model):
"""
_validation = {
- "name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "name": {"key": "name", "type": "str"},
+ "tier": {"key": "tier", "type": "str"},
+ }
+
+ def __init__(self, *, name: str, tier: Optional[Union[str, "_models.SkuTier"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword name: [Required] The name of the SKU. Ex - P3. It is typically a letter+number code.
+ Required.
+ :paramtype name: str
+ :keyword tier: This field is required to be implemented by the Resource Provider if the service
+ has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
+ "Standard", and "Premium".
+ :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ """
+ super().__init__(**kwargs)
+ self.name = name
+ self.tier = tier
+
+
+class SparkJob(JobBaseProperties): # pylint: disable=too-many-instance-attributes
+ """Spark job definition.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar component_id: ARM resource ID of the component resource.
+ :vartype component_id: str
+ :ivar compute_id: ARM resource ID of the compute resource.
+ :vartype compute_id: str
+ :ivar display_name: Display name of job.
+ :vartype display_name: str
+ :ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :vartype experiment_name: str
+ :ivar identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
+ "Command", "Labeling", "Sweep", "Pipeline", and "Spark".
+ :vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar secrets_configuration: Configuration for secrets to be made available during runtime.
+ :vartype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
+ :ivar services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
+ "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
+ "Canceled", "NotResponding", "Paused", "Unknown", and "Scheduled".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
+ :ivar archives: Archive files used in the job.
+ :vartype archives: list[str]
+ :ivar args: Arguments for the job.
+ :vartype args: str
+ :ivar code_id: [Required] ARM resource ID of the code asset. Required.
+ :vartype code_id: str
+ :ivar conf: Spark configured properties.
+ :vartype conf: dict[str, str]
+ :ivar entry: [Required] The entry to execute on startup of the job. Required.
+ :vartype entry: ~azure.mgmt.machinelearningservices.models.SparkJobEntry
+ :ivar environment_id: The ARM resource ID of the Environment specification for the job.
+ :vartype environment_id: str
+ :ivar environment_variables: Environment variables included in the job.
+ :vartype environment_variables: dict[str, str]
+ :ivar files: Files used in the job.
+ :vartype files: list[str]
+ :ivar inputs: Mapping of input data bindings used in the job.
+ :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :ivar jars: Jar files used in the job.
+ :vartype jars: list[str]
+ :ivar outputs: Mapping of output data bindings used in the job.
+ :vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :ivar py_files: Python files used in the job.
+ :vartype py_files: list[str]
+ :ivar queue_settings: Queue settings for the job.
+ :vartype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
+ :ivar resources: Compute Resource configuration for the job.
+ :vartype resources: ~azure.mgmt.machinelearningservices.models.SparkResourceConfiguration
+ """
+
+ _validation = {
+ "job_type": {"required": True},
+ "status": {"readonly": True},
+ "code_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "entry": {"required": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "component_id": {"key": "componentId", "type": "str"},
+ "compute_id": {"key": "computeId", "type": "str"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "experiment_name": {"key": "experimentName", "type": "str"},
+ "identity": {"key": "identity", "type": "IdentityConfiguration"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
+ "secrets_configuration": {"key": "secretsConfiguration", "type": "{SecretConfiguration}"},
+ "services": {"key": "services", "type": "{JobService}"},
+ "status": {"key": "status", "type": "str"},
+ "archives": {"key": "archives", "type": "[str]"},
+ "args": {"key": "args", "type": "str"},
+ "code_id": {"key": "codeId", "type": "str"},
+ "conf": {"key": "conf", "type": "{str}"},
+ "entry": {"key": "entry", "type": "SparkJobEntry"},
+ "environment_id": {"key": "environmentId", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "files": {"key": "files", "type": "[str]"},
+ "inputs": {"key": "inputs", "type": "{JobInput}"},
+ "jars": {"key": "jars", "type": "[str]"},
+ "outputs": {"key": "outputs", "type": "{JobOutput}"},
+ "py_files": {"key": "pyFiles", "type": "[str]"},
+ "queue_settings": {"key": "queueSettings", "type": "QueueSettings"},
+ "resources": {"key": "resources", "type": "SparkResourceConfiguration"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ code_id: str,
+ entry: "_models.SparkJobEntry",
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ component_id: Optional[str] = None,
+ compute_id: Optional[str] = None,
+ display_name: Optional[str] = None,
+ experiment_name: str = "Default",
+ identity: Optional["_models.IdentityConfiguration"] = None,
+ is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
+ secrets_configuration: Optional[Dict[str, "_models.SecretConfiguration"]] = None,
+ services: Optional[Dict[str, "_models.JobService"]] = None,
+ archives: Optional[List[str]] = None,
+ args: Optional[str] = None,
+ conf: Optional[Dict[str, str]] = None,
+ environment_id: Optional[str] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ files: Optional[List[str]] = None,
+ inputs: Optional[Dict[str, "_models.JobInput"]] = None,
+ jars: Optional[List[str]] = None,
+ outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ py_files: Optional[List[str]] = None,
+ queue_settings: Optional["_models.QueueSettings"] = None,
+ resources: Optional["_models.SparkResourceConfiguration"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword component_id: ARM resource ID of the component resource.
+ :paramtype component_id: str
+ :keyword compute_id: ARM resource ID of the compute resource.
+ :paramtype compute_id: str
+ :keyword display_name: Display name of job.
+ :paramtype display_name: str
+ :keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :paramtype experiment_name: str
+ :keyword identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword secrets_configuration: Configuration for secrets to be made available during runtime.
+ :paramtype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
+ :keyword services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :keyword archives: Archive files used in the job.
+ :paramtype archives: list[str]
+ :keyword args: Arguments for the job.
+ :paramtype args: str
+ :keyword code_id: [Required] ARM resource ID of the code asset. Required.
+ :paramtype code_id: str
+ :keyword conf: Spark configured properties.
+ :paramtype conf: dict[str, str]
+ :keyword entry: [Required] The entry to execute on startup of the job. Required.
+ :paramtype entry: ~azure.mgmt.machinelearningservices.models.SparkJobEntry
+ :keyword environment_id: The ARM resource ID of the Environment specification for the job.
+ :paramtype environment_id: str
+ :keyword environment_variables: Environment variables included in the job.
+ :paramtype environment_variables: dict[str, str]
+ :keyword files: Files used in the job.
+ :paramtype files: list[str]
+ :keyword inputs: Mapping of input data bindings used in the job.
+ :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :keyword jars: Jar files used in the job.
+ :paramtype jars: list[str]
+ :keyword outputs: Mapping of output data bindings used in the job.
+ :paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword py_files: Python files used in the job.
+ :paramtype py_files: list[str]
+ :keyword queue_settings: Queue settings for the job.
+ :paramtype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
+ :keyword resources: Compute Resource configuration for the job.
+ :paramtype resources: ~azure.mgmt.machinelearningservices.models.SparkResourceConfiguration
+ """
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ component_id=component_id,
+ compute_id=compute_id,
+ display_name=display_name,
+ experiment_name=experiment_name,
+ identity=identity,
+ is_archived=is_archived,
+ notification_setting=notification_setting,
+ secrets_configuration=secrets_configuration,
+ services=services,
+ **kwargs
+ )
+ self.job_type: str = "Spark"
+ self.archives = archives
+ self.args = args
+ self.code_id = code_id
+ self.conf = conf
+ self.entry = entry
+ self.environment_id = environment_id
+ self.environment_variables = environment_variables
+ self.files = files
+ self.inputs = inputs
+ self.jars = jars
+ self.outputs = outputs
+ self.py_files = py_files
+ self.queue_settings = queue_settings
+ self.resources = resources
+
+
+class SparkJobEntry(_serialization.Model):
+ """Spark job entry point definition.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ SparkJobPythonEntry, SparkJobScalaEntry
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar spark_job_entry_type: [Required] Type of the job's entry point. Required. Known values
+ are: "SparkJobPythonEntry" and "SparkJobScalaEntry".
+ :vartype spark_job_entry_type: str or
+ ~azure.mgmt.machinelearningservices.models.SparkJobEntryType
+ """
+
+ _validation = {
+ "spark_job_entry_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "spark_job_entry_type": {"key": "sparkJobEntryType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "spark_job_entry_type": {
+ "SparkJobPythonEntry": "SparkJobPythonEntry",
+ "SparkJobScalaEntry": "SparkJobScalaEntry",
+ }
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.spark_job_entry_type: Optional[str] = None
+
+
+class SparkJobPythonEntry(SparkJobEntry):
+ """SparkJobPythonEntry.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar spark_job_entry_type: [Required] Type of the job's entry point. Required. Known values
+ are: "SparkJobPythonEntry" and "SparkJobScalaEntry".
+ :vartype spark_job_entry_type: str or
+ ~azure.mgmt.machinelearningservices.models.SparkJobEntryType
+ :ivar file: [Required] Relative python file path for job entry point. Required.
+ :vartype file: str
+ """
+
+ _validation = {
+ "spark_job_entry_type": {"required": True},
+ "file": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "spark_job_entry_type": {"key": "sparkJobEntryType", "type": "str"},
+ "file": {"key": "file", "type": "str"},
+ }
+
+ def __init__(self, *, file: str, **kwargs: Any) -> None:
+ """
+ :keyword file: [Required] Relative python file path for job entry point. Required.
+ :paramtype file: str
+ """
+ super().__init__(**kwargs)
+ self.spark_job_entry_type: str = "SparkJobPythonEntry"
+ self.file = file
+
+
+class SparkJobScalaEntry(SparkJobEntry):
+ """SparkJobScalaEntry.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar spark_job_entry_type: [Required] Type of the job's entry point. Required. Known values
+ are: "SparkJobPythonEntry" and "SparkJobScalaEntry".
+ :vartype spark_job_entry_type: str or
+ ~azure.mgmt.machinelearningservices.models.SparkJobEntryType
+ :ivar class_name: [Required] Scala class name used as entry point. Required.
+ :vartype class_name: str
+ """
+
+ _validation = {
+ "spark_job_entry_type": {"required": True},
+ "class_name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "name": {"key": "name", "type": "str"},
- "tier": {"key": "tier", "type": "str"},
+ "spark_job_entry_type": {"key": "sparkJobEntryType", "type": "str"},
+ "class_name": {"key": "className", "type": "str"},
}
- def __init__(self, *, name: str, tier: Optional[Union[str, "_models.SkuTier"]] = None, **kwargs: Any) -> None:
+ def __init__(self, *, class_name: str, **kwargs: Any) -> None:
"""
- :keyword name: [Required] The name of the SKU. Ex - P3. It is typically a letter+number code.
- Required.
- :paramtype name: str
- :keyword tier: This field is required to be implemented by the Resource Provider if the service
- has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
- "Standard", and "Premium".
- :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ :keyword class_name: [Required] Scala class name used as entry point. Required.
+ :paramtype class_name: str
"""
super().__init__(**kwargs)
- self.name = name
- self.tier = tier
+ self.spark_job_entry_type: str = "SparkJobScalaEntry"
+ self.class_name = class_name
+
+
+class SparkResourceConfiguration(_serialization.Model):
+ """SparkResourceConfiguration.
+
+ :ivar instance_type: Optional type of VM used as supported by the compute target.
+ :vartype instance_type: str
+ :ivar runtime_version: Version of spark runtime used for the job.
+ :vartype runtime_version: str
+ """
+
+ _attribute_map = {
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "runtime_version": {"key": "runtimeVersion", "type": "str"},
+ }
+
+ def __init__(self, *, instance_type: Optional[str] = None, runtime_version: str = "3.1", **kwargs: Any) -> None:
+ """
+ :keyword instance_type: Optional type of VM used as supported by the compute target.
+ :paramtype instance_type: str
+ :keyword runtime_version: Version of spark runtime used for the job.
+ :paramtype runtime_version: str
+ """
+ super().__init__(**kwargs)
+ self.instance_type = instance_type
+ self.runtime_version = runtime_version
class SslConfiguration(_serialization.Model):
@@ -19516,6 +29359,130 @@ def __init__(
self.stack_meta_learner_type = stack_meta_learner_type
+class StaticInputData(MonitoringInputDataBase):
+ """Static input data definition.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar columns: Mapping of column names to special uses.
+ :vartype columns: dict[str, str]
+ :ivar data_context: The context metadata of the data source.
+ :vartype data_context: str
+ :ivar input_data_type: [Required] Specifies the type of signal to monitor. Required. Known
+ values are: "Static", "Rolling", and "Fixed".
+ :vartype input_data_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataType
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ :ivar preprocessing_component_id: The ARM resource ID of the component resource used to
+ preprocess the data.
+ :vartype preprocessing_component_id: str
+ :ivar window_end: [Required] The end date of the data window. Required.
+ :vartype window_end: ~datetime.datetime
+ :ivar window_start: [Required] The start date of the data window. Required.
+ :vartype window_start: ~datetime.datetime
+ """
+
+ _validation = {
+ "input_data_type": {"required": True},
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "window_end": {"required": True},
+ "window_start": {"required": True},
+ }
+
+ _attribute_map = {
+ "columns": {"key": "columns", "type": "{str}"},
+ "data_context": {"key": "dataContext", "type": "str"},
+ "input_data_type": {"key": "inputDataType", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ "preprocessing_component_id": {"key": "preprocessingComponentId", "type": "str"},
+ "window_end": {"key": "windowEnd", "type": "iso-8601"},
+ "window_start": {"key": "windowStart", "type": "iso-8601"},
+ }
+
+ def __init__(
+ self,
+ *,
+ job_input_type: Union[str, "_models.JobInputType"],
+ uri: str,
+ window_end: datetime.datetime,
+ window_start: datetime.datetime,
+ columns: Optional[Dict[str, str]] = None,
+ data_context: Optional[str] = None,
+ preprocessing_component_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword columns: Mapping of column names to special uses.
+ :paramtype columns: dict[str, str]
+ :keyword data_context: The context metadata of the data source.
+ :paramtype data_context: str
+ :keyword job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :paramtype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ :keyword preprocessing_component_id: The ARM resource ID of the component resource used to
+ preprocess the data.
+ :paramtype preprocessing_component_id: str
+ :keyword window_end: [Required] The end date of the data window. Required.
+ :paramtype window_end: ~datetime.datetime
+ :keyword window_start: [Required] The start date of the data window. Required.
+ :paramtype window_start: ~datetime.datetime
+ """
+ super().__init__(columns=columns, data_context=data_context, job_input_type=job_input_type, uri=uri, **kwargs)
+ self.input_data_type: str = "Static"
+ self.preprocessing_component_id = preprocessing_component_id
+ self.window_end = window_end
+ self.window_start = window_start
+
+
+class StatusMessage(_serialization.Model):
+ """Active message associated with project.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar code: Service-defined message code.
+ :vartype code: str
+ :ivar created_date_time: Time in UTC at which the message was created.
+ :vartype created_date_time: ~datetime.datetime
+ :ivar level: Severity level of message. Known values are: "Error", "Information", and
+ "Warning".
+ :vartype level: str or ~azure.mgmt.machinelearningservices.models.StatusMessageLevel
+ :ivar message: A human-readable representation of the message code.
+ :vartype message: str
+ """
+
+ _validation = {
+ "code": {"readonly": True},
+ "created_date_time": {"readonly": True},
+ "level": {"readonly": True},
+ "message": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "code": {"key": "code", "type": "str"},
+ "created_date_time": {"key": "createdDateTime", "type": "iso-8601"},
+ "level": {"key": "level", "type": "str"},
+ "message": {"key": "message", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.code = None
+ self.created_date_time = None
+ self.level = None
+ self.message = None
+
+
class StorageAccountDetails(_serialization.Model):
"""Details of storage account to be used for the Registry.
@@ -19585,15 +29552,23 @@ class SweepJob(JobBaseProperties): # pylint: disable=too-many-instance-attribut
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
+ "Command", "Labeling", "Sweep", "Pipeline", and "Spark".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar secrets_configuration: Configuration for secrets to be made available during runtime.
+ :vartype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
"Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
- "Canceled", "NotResponding", "Paused", and "Unknown".
+ "Canceled", "NotResponding", "Paused", "Unknown", and "Scheduled".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
+ :ivar component_configuration: Component Configuration for sweep over component.
+ :vartype component_configuration:
+ ~azure.mgmt.machinelearningservices.models.ComponentConfiguration
:ivar early_termination: Early termination policies enable canceling poor-performing runs
before they complete.
:vartype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
@@ -19605,6 +29580,10 @@ class SweepJob(JobBaseProperties): # pylint: disable=too-many-instance-attribut
:vartype objective: ~azure.mgmt.machinelearningservices.models.Objective
:ivar outputs: Mapping of output data bindings used in the job.
:vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :ivar queue_settings: Queue settings for the job.
+ :vartype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
+ :ivar resources: Compute Resource configuration for the job.
+ :vartype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
:ivar sampling_algorithm: [Required] The hyperparameter sampling algorithm. Required.
:vartype sampling_algorithm: ~azure.mgmt.machinelearningservices.models.SamplingAlgorithm
:ivar search_space: [Required] A dictionary containing each parameter and its distribution. The
@@ -19634,19 +29613,24 @@ class SweepJob(JobBaseProperties): # pylint: disable=too-many-instance-attribut
"identity": {"key": "identity", "type": "IdentityConfiguration"},
"is_archived": {"key": "isArchived", "type": "bool"},
"job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
+ "secrets_configuration": {"key": "secretsConfiguration", "type": "{SecretConfiguration}"},
"services": {"key": "services", "type": "{JobService}"},
"status": {"key": "status", "type": "str"},
+ "component_configuration": {"key": "componentConfiguration", "type": "ComponentConfiguration"},
"early_termination": {"key": "earlyTermination", "type": "EarlyTerminationPolicy"},
"inputs": {"key": "inputs", "type": "{JobInput}"},
"limits": {"key": "limits", "type": "SweepJobLimits"},
"objective": {"key": "objective", "type": "Objective"},
"outputs": {"key": "outputs", "type": "{JobOutput}"},
+ "queue_settings": {"key": "queueSettings", "type": "QueueSettings"},
+ "resources": {"key": "resources", "type": "JobResourceConfiguration"},
"sampling_algorithm": {"key": "samplingAlgorithm", "type": "SamplingAlgorithm"},
"search_space": {"key": "searchSpace", "type": "object"},
"trial": {"key": "trial", "type": "TrialComponent"},
}
- def __init__(
+ def __init__( # pylint: disable=too-many-locals
self,
*,
objective: "_models.Objective",
@@ -19662,11 +29646,16 @@ def __init__(
experiment_name: str = "Default",
identity: Optional["_models.IdentityConfiguration"] = None,
is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
+ secrets_configuration: Optional[Dict[str, "_models.SecretConfiguration"]] = None,
services: Optional[Dict[str, "_models.JobService"]] = None,
+ component_configuration: Optional["_models.ComponentConfiguration"] = None,
early_termination: Optional["_models.EarlyTerminationPolicy"] = None,
inputs: Optional[Dict[str, "_models.JobInput"]] = None,
limits: Optional["_models.SweepJobLimits"] = None,
outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ queue_settings: Optional["_models.QueueSettings"] = None,
+ resources: Optional["_models.JobResourceConfiguration"] = None,
**kwargs: Any
) -> None:
"""
@@ -19691,9 +29680,17 @@ def __init__(
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword secrets_configuration: Configuration for secrets to be made available during runtime.
+ :paramtype secrets_configuration: dict[str,
+ ~azure.mgmt.machinelearningservices.models.SecretConfiguration]
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :keyword component_configuration: Component Configuration for sweep over component.
+ :paramtype component_configuration:
+ ~azure.mgmt.machinelearningservices.models.ComponentConfiguration
:keyword early_termination: Early termination policies enable canceling poor-performing runs
before they complete.
:paramtype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
@@ -19705,6 +29702,10 @@ def __init__(
:paramtype objective: ~azure.mgmt.machinelearningservices.models.Objective
:keyword outputs: Mapping of output data bindings used in the job.
:paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword queue_settings: Queue settings for the job.
+ :paramtype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
+ :keyword resources: Compute Resource configuration for the job.
+ :paramtype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
:keyword sampling_algorithm: [Required] The hyperparameter sampling algorithm. Required.
:paramtype sampling_algorithm: ~azure.mgmt.machinelearningservices.models.SamplingAlgorithm
:keyword search_space: [Required] A dictionary containing each parameter and its distribution.
@@ -19723,15 +29724,20 @@ def __init__(
experiment_name=experiment_name,
identity=identity,
is_archived=is_archived,
+ notification_setting=notification_setting,
+ secrets_configuration=secrets_configuration,
services=services,
**kwargs
)
self.job_type: str = "Sweep"
+ self.component_configuration = component_configuration
self.early_termination = early_termination
self.inputs = inputs
self.limits = limits
self.objective = objective
self.outputs = outputs
+ self.queue_settings = queue_settings
+ self.resources = resources
self.sampling_algorithm = sampling_algorithm
self.search_space = search_space
self.trial = trial
@@ -20186,6 +30192,372 @@ def __init__(self, **kwargs: Any) -> None:
self.version = None
+class TableFixedParameters(_serialization.Model): # pylint: disable=too-many-instance-attributes
+ """Fixed training parameters that won't be swept over during AutoML Table training.
+
+ :ivar booster: Specify the boosting type, e.g gbdt for XGBoost.
+ :vartype booster: str
+ :ivar boosting_type: Specify the boosting type, e.g gbdt for LightGBM.
+ :vartype boosting_type: str
+ :ivar grow_policy: Specify the grow policy, which controls the way new nodes are added to the
+ tree.
+ :vartype grow_policy: str
+ :ivar learning_rate: The learning rate for the training procedure.
+ :vartype learning_rate: float
+ :ivar max_bin: Specify the Maximum number of discrete bins to bucket continuous features .
+ :vartype max_bin: int
+ :ivar max_depth: Specify the max depth to limit the tree depth explicitly.
+ :vartype max_depth: int
+ :ivar max_leaves: Specify the max leaves to limit the tree leaves explicitly.
+ :vartype max_leaves: int
+ :ivar min_data_in_leaf: The minimum number of data per leaf.
+ :vartype min_data_in_leaf: int
+ :ivar min_split_gain: Minimum loss reduction required to make a further partition on a leaf
+ node of the tree.
+ :vartype min_split_gain: float
+ :ivar model_name: The name of the model to train.
+ :vartype model_name: str
+ :ivar n_estimators: Specify the number of trees (or rounds) in an model.
+ :vartype n_estimators: int
+ :ivar num_leaves: Specify the number of leaves.
+ :vartype num_leaves: int
+ :ivar preprocessor_name: The name of the preprocessor to use.
+ :vartype preprocessor_name: str
+ :ivar reg_alpha: L1 regularization term on weights.
+ :vartype reg_alpha: float
+ :ivar reg_lambda: L2 regularization term on weights.
+ :vartype reg_lambda: float
+ :ivar subsample: Subsample ratio of the training instance.
+ :vartype subsample: float
+ :ivar subsample_freq: Frequency of subsample.
+ :vartype subsample_freq: float
+ :ivar tree_method: Specify the tree method.
+ :vartype tree_method: str
+ :ivar with_mean: If true, center before scaling the data with StandardScalar.
+ :vartype with_mean: bool
+ :ivar with_std: If true, scaling the data with Unit Variance with StandardScalar.
+ :vartype with_std: bool
+ """
+
+ _attribute_map = {
+ "booster": {"key": "booster", "type": "str"},
+ "boosting_type": {"key": "boostingType", "type": "str"},
+ "grow_policy": {"key": "growPolicy", "type": "str"},
+ "learning_rate": {"key": "learningRate", "type": "float"},
+ "max_bin": {"key": "maxBin", "type": "int"},
+ "max_depth": {"key": "maxDepth", "type": "int"},
+ "max_leaves": {"key": "maxLeaves", "type": "int"},
+ "min_data_in_leaf": {"key": "minDataInLeaf", "type": "int"},
+ "min_split_gain": {"key": "minSplitGain", "type": "float"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "n_estimators": {"key": "nEstimators", "type": "int"},
+ "num_leaves": {"key": "numLeaves", "type": "int"},
+ "preprocessor_name": {"key": "preprocessorName", "type": "str"},
+ "reg_alpha": {"key": "regAlpha", "type": "float"},
+ "reg_lambda": {"key": "regLambda", "type": "float"},
+ "subsample": {"key": "subsample", "type": "float"},
+ "subsample_freq": {"key": "subsampleFreq", "type": "float"},
+ "tree_method": {"key": "treeMethod", "type": "str"},
+ "with_mean": {"key": "withMean", "type": "bool"},
+ "with_std": {"key": "withStd", "type": "bool"},
+ }
+
+ def __init__(
+ self,
+ *,
+ booster: Optional[str] = None,
+ boosting_type: Optional[str] = None,
+ grow_policy: Optional[str] = None,
+ learning_rate: Optional[float] = None,
+ max_bin: Optional[int] = None,
+ max_depth: Optional[int] = None,
+ max_leaves: Optional[int] = None,
+ min_data_in_leaf: Optional[int] = None,
+ min_split_gain: Optional[float] = None,
+ model_name: Optional[str] = None,
+ n_estimators: Optional[int] = None,
+ num_leaves: Optional[int] = None,
+ preprocessor_name: Optional[str] = None,
+ reg_alpha: Optional[float] = None,
+ reg_lambda: Optional[float] = None,
+ subsample: Optional[float] = None,
+ subsample_freq: Optional[float] = None,
+ tree_method: Optional[str] = None,
+ with_mean: bool = False,
+ with_std: bool = False,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword booster: Specify the boosting type, e.g gbdt for XGBoost.
+ :paramtype booster: str
+ :keyword boosting_type: Specify the boosting type, e.g gbdt for LightGBM.
+ :paramtype boosting_type: str
+ :keyword grow_policy: Specify the grow policy, which controls the way new nodes are added to
+ the tree.
+ :paramtype grow_policy: str
+ :keyword learning_rate: The learning rate for the training procedure.
+ :paramtype learning_rate: float
+ :keyword max_bin: Specify the Maximum number of discrete bins to bucket continuous features .
+ :paramtype max_bin: int
+ :keyword max_depth: Specify the max depth to limit the tree depth explicitly.
+ :paramtype max_depth: int
+ :keyword max_leaves: Specify the max leaves to limit the tree leaves explicitly.
+ :paramtype max_leaves: int
+ :keyword min_data_in_leaf: The minimum number of data per leaf.
+ :paramtype min_data_in_leaf: int
+ :keyword min_split_gain: Minimum loss reduction required to make a further partition on a leaf
+ node of the tree.
+ :paramtype min_split_gain: float
+ :keyword model_name: The name of the model to train.
+ :paramtype model_name: str
+ :keyword n_estimators: Specify the number of trees (or rounds) in an model.
+ :paramtype n_estimators: int
+ :keyword num_leaves: Specify the number of leaves.
+ :paramtype num_leaves: int
+ :keyword preprocessor_name: The name of the preprocessor to use.
+ :paramtype preprocessor_name: str
+ :keyword reg_alpha: L1 regularization term on weights.
+ :paramtype reg_alpha: float
+ :keyword reg_lambda: L2 regularization term on weights.
+ :paramtype reg_lambda: float
+ :keyword subsample: Subsample ratio of the training instance.
+ :paramtype subsample: float
+ :keyword subsample_freq: Frequency of subsample.
+ :paramtype subsample_freq: float
+ :keyword tree_method: Specify the tree method.
+ :paramtype tree_method: str
+ :keyword with_mean: If true, center before scaling the data with StandardScalar.
+ :paramtype with_mean: bool
+ :keyword with_std: If true, scaling the data with Unit Variance with StandardScalar.
+ :paramtype with_std: bool
+ """
+ super().__init__(**kwargs)
+ self.booster = booster
+ self.boosting_type = boosting_type
+ self.grow_policy = grow_policy
+ self.learning_rate = learning_rate
+ self.max_bin = max_bin
+ self.max_depth = max_depth
+ self.max_leaves = max_leaves
+ self.min_data_in_leaf = min_data_in_leaf
+ self.min_split_gain = min_split_gain
+ self.model_name = model_name
+ self.n_estimators = n_estimators
+ self.num_leaves = num_leaves
+ self.preprocessor_name = preprocessor_name
+ self.reg_alpha = reg_alpha
+ self.reg_lambda = reg_lambda
+ self.subsample = subsample
+ self.subsample_freq = subsample_freq
+ self.tree_method = tree_method
+ self.with_mean = with_mean
+ self.with_std = with_std
+
+
+class TableParameterSubspace(_serialization.Model): # pylint: disable=too-many-instance-attributes
+ """TableParameterSubspace.
+
+ :ivar booster: Specify the boosting type, e.g gbdt for XGBoost.
+ :vartype booster: str
+ :ivar boosting_type: Specify the boosting type, e.g gbdt for LightGBM.
+ :vartype boosting_type: str
+ :ivar grow_policy: Specify the grow policy, which controls the way new nodes are added to the
+ tree.
+ :vartype grow_policy: str
+ :ivar learning_rate: The learning rate for the training procedure.
+ :vartype learning_rate: str
+ :ivar max_bin: Specify the Maximum number of discrete bins to bucket continuous features .
+ :vartype max_bin: str
+ :ivar max_depth: Specify the max depth to limit the tree depth explicitly.
+ :vartype max_depth: str
+ :ivar max_leaves: Specify the max leaves to limit the tree leaves explicitly.
+ :vartype max_leaves: str
+ :ivar min_data_in_leaf: The minimum number of data per leaf.
+ :vartype min_data_in_leaf: str
+ :ivar min_split_gain: Minimum loss reduction required to make a further partition on a leaf
+ node of the tree.
+ :vartype min_split_gain: str
+ :ivar model_name: The name of the model to train.
+ :vartype model_name: str
+ :ivar n_estimators: Specify the number of trees (or rounds) in an model.
+ :vartype n_estimators: str
+ :ivar num_leaves: Specify the number of leaves.
+ :vartype num_leaves: str
+ :ivar preprocessor_name: The name of the preprocessor to use.
+ :vartype preprocessor_name: str
+ :ivar reg_alpha: L1 regularization term on weights.
+ :vartype reg_alpha: str
+ :ivar reg_lambda: L2 regularization term on weights.
+ :vartype reg_lambda: str
+ :ivar subsample: Subsample ratio of the training instance.
+ :vartype subsample: str
+ :ivar subsample_freq: Frequency of subsample.
+ :vartype subsample_freq: str
+ :ivar tree_method: Specify the tree method.
+ :vartype tree_method: str
+ :ivar with_mean: If true, center before scaling the data with StandardScalar.
+ :vartype with_mean: str
+ :ivar with_std: If true, scaling the data with Unit Variance with StandardScalar.
+ :vartype with_std: str
+ """
+
+ _attribute_map = {
+ "booster": {"key": "booster", "type": "str"},
+ "boosting_type": {"key": "boostingType", "type": "str"},
+ "grow_policy": {"key": "growPolicy", "type": "str"},
+ "learning_rate": {"key": "learningRate", "type": "str"},
+ "max_bin": {"key": "maxBin", "type": "str"},
+ "max_depth": {"key": "maxDepth", "type": "str"},
+ "max_leaves": {"key": "maxLeaves", "type": "str"},
+ "min_data_in_leaf": {"key": "minDataInLeaf", "type": "str"},
+ "min_split_gain": {"key": "minSplitGain", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "n_estimators": {"key": "nEstimators", "type": "str"},
+ "num_leaves": {"key": "numLeaves", "type": "str"},
+ "preprocessor_name": {"key": "preprocessorName", "type": "str"},
+ "reg_alpha": {"key": "regAlpha", "type": "str"},
+ "reg_lambda": {"key": "regLambda", "type": "str"},
+ "subsample": {"key": "subsample", "type": "str"},
+ "subsample_freq": {"key": "subsampleFreq", "type": "str"},
+ "tree_method": {"key": "treeMethod", "type": "str"},
+ "with_mean": {"key": "withMean", "type": "str"},
+ "with_std": {"key": "withStd", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ booster: Optional[str] = None,
+ boosting_type: Optional[str] = None,
+ grow_policy: Optional[str] = None,
+ learning_rate: Optional[str] = None,
+ max_bin: Optional[str] = None,
+ max_depth: Optional[str] = None,
+ max_leaves: Optional[str] = None,
+ min_data_in_leaf: Optional[str] = None,
+ min_split_gain: Optional[str] = None,
+ model_name: Optional[str] = None,
+ n_estimators: Optional[str] = None,
+ num_leaves: Optional[str] = None,
+ preprocessor_name: Optional[str] = None,
+ reg_alpha: Optional[str] = None,
+ reg_lambda: Optional[str] = None,
+ subsample: Optional[str] = None,
+ subsample_freq: Optional[str] = None,
+ tree_method: Optional[str] = None,
+ with_mean: Optional[str] = None,
+ with_std: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword booster: Specify the boosting type, e.g gbdt for XGBoost.
+ :paramtype booster: str
+ :keyword boosting_type: Specify the boosting type, e.g gbdt for LightGBM.
+ :paramtype boosting_type: str
+ :keyword grow_policy: Specify the grow policy, which controls the way new nodes are added to
+ the tree.
+ :paramtype grow_policy: str
+ :keyword learning_rate: The learning rate for the training procedure.
+ :paramtype learning_rate: str
+ :keyword max_bin: Specify the Maximum number of discrete bins to bucket continuous features .
+ :paramtype max_bin: str
+ :keyword max_depth: Specify the max depth to limit the tree depth explicitly.
+ :paramtype max_depth: str
+ :keyword max_leaves: Specify the max leaves to limit the tree leaves explicitly.
+ :paramtype max_leaves: str
+ :keyword min_data_in_leaf: The minimum number of data per leaf.
+ :paramtype min_data_in_leaf: str
+ :keyword min_split_gain: Minimum loss reduction required to make a further partition on a leaf
+ node of the tree.
+ :paramtype min_split_gain: str
+ :keyword model_name: The name of the model to train.
+ :paramtype model_name: str
+ :keyword n_estimators: Specify the number of trees (or rounds) in an model.
+ :paramtype n_estimators: str
+ :keyword num_leaves: Specify the number of leaves.
+ :paramtype num_leaves: str
+ :keyword preprocessor_name: The name of the preprocessor to use.
+ :paramtype preprocessor_name: str
+ :keyword reg_alpha: L1 regularization term on weights.
+ :paramtype reg_alpha: str
+ :keyword reg_lambda: L2 regularization term on weights.
+ :paramtype reg_lambda: str
+ :keyword subsample: Subsample ratio of the training instance.
+ :paramtype subsample: str
+ :keyword subsample_freq: Frequency of subsample.
+ :paramtype subsample_freq: str
+ :keyword tree_method: Specify the tree method.
+ :paramtype tree_method: str
+ :keyword with_mean: If true, center before scaling the data with StandardScalar.
+ :paramtype with_mean: str
+ :keyword with_std: If true, scaling the data with Unit Variance with StandardScalar.
+ :paramtype with_std: str
+ """
+ super().__init__(**kwargs)
+ self.booster = booster
+ self.boosting_type = boosting_type
+ self.grow_policy = grow_policy
+ self.learning_rate = learning_rate
+ self.max_bin = max_bin
+ self.max_depth = max_depth
+ self.max_leaves = max_leaves
+ self.min_data_in_leaf = min_data_in_leaf
+ self.min_split_gain = min_split_gain
+ self.model_name = model_name
+ self.n_estimators = n_estimators
+ self.num_leaves = num_leaves
+ self.preprocessor_name = preprocessor_name
+ self.reg_alpha = reg_alpha
+ self.reg_lambda = reg_lambda
+ self.subsample = subsample
+ self.subsample_freq = subsample_freq
+ self.tree_method = tree_method
+ self.with_mean = with_mean
+ self.with_std = with_std
+
+
+class TableSweepSettings(_serialization.Model):
+ """TableSweepSettings.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar early_termination: Type of early termination policy for the sweeping job.
+ :vartype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
+ :ivar sampling_algorithm: [Required] Type of sampling algorithm. Required. Known values are:
+ "Grid", "Random", and "Bayesian".
+ :vartype sampling_algorithm: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ """
+
+ _validation = {
+ "sampling_algorithm": {"required": True},
+ }
+
+ _attribute_map = {
+ "early_termination": {"key": "earlyTermination", "type": "EarlyTerminationPolicy"},
+ "sampling_algorithm": {"key": "samplingAlgorithm", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ sampling_algorithm: Union[str, "_models.SamplingAlgorithmType"],
+ early_termination: Optional["_models.EarlyTerminationPolicy"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword early_termination: Type of early termination policy for the sweeping job.
+ :paramtype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
+ :keyword sampling_algorithm: [Required] Type of sampling algorithm. Required. Known values are:
+ "Grid", "Random", and "Bayesian".
+ :paramtype sampling_algorithm: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ """
+ super().__init__(**kwargs)
+ self.early_termination = early_termination
+ self.sampling_algorithm = sampling_algorithm
+
+
class TableVerticalFeaturizationSettings(FeaturizationSettings):
"""Featurization Configuration.
@@ -20275,8 +30647,14 @@ class TableVerticalLimitSettings(_serialization.Model):
:vartype max_concurrent_trials: int
:ivar max_cores_per_trial: Max cores per iteration.
:vartype max_cores_per_trial: int
+ :ivar max_nodes: Maximum nodes to use for the experiment.
+ :vartype max_nodes: int
:ivar max_trials: Number of iterations.
:vartype max_trials: int
+ :ivar sweep_concurrent_trials: Number of concurrent sweeping runs that user wants to trigger.
+ :vartype sweep_concurrent_trials: int
+ :ivar sweep_trials: Number of sweeping runs that user wants to trigger.
+ :vartype sweep_trials: int
:ivar timeout: AutoML job timeout.
:vartype timeout: ~datetime.timedelta
:ivar trial_timeout: Iteration timeout.
@@ -20288,7 +30666,10 @@ class TableVerticalLimitSettings(_serialization.Model):
"exit_score": {"key": "exitScore", "type": "float"},
"max_concurrent_trials": {"key": "maxConcurrentTrials", "type": "int"},
"max_cores_per_trial": {"key": "maxCoresPerTrial", "type": "int"},
+ "max_nodes": {"key": "maxNodes", "type": "int"},
"max_trials": {"key": "maxTrials", "type": "int"},
+ "sweep_concurrent_trials": {"key": "sweepConcurrentTrials", "type": "int"},
+ "sweep_trials": {"key": "sweepTrials", "type": "int"},
"timeout": {"key": "timeout", "type": "duration"},
"trial_timeout": {"key": "trialTimeout", "type": "duration"},
}
@@ -20300,7 +30681,10 @@ def __init__(
exit_score: Optional[float] = None,
max_concurrent_trials: int = 1,
max_cores_per_trial: int = -1,
+ max_nodes: int = 1,
max_trials: int = 1000,
+ sweep_concurrent_trials: int = 0,
+ sweep_trials: int = 0,
timeout: datetime.timedelta = "PT6H",
trial_timeout: datetime.timedelta = "PT30M",
**kwargs: Any
@@ -20315,8 +30699,15 @@ def __init__(
:paramtype max_concurrent_trials: int
:keyword max_cores_per_trial: Max cores per iteration.
:paramtype max_cores_per_trial: int
+ :keyword max_nodes: Maximum nodes to use for the experiment.
+ :paramtype max_nodes: int
:keyword max_trials: Number of iterations.
:paramtype max_trials: int
+ :keyword sweep_concurrent_trials: Number of concurrent sweeping runs that user wants to
+ trigger.
+ :paramtype sweep_concurrent_trials: int
+ :keyword sweep_trials: Number of sweeping runs that user wants to trigger.
+ :paramtype sweep_trials: int
:keyword timeout: AutoML job timeout.
:paramtype timeout: ~datetime.timedelta
:keyword trial_timeout: Iteration timeout.
@@ -20327,7 +30718,10 @@ def __init__(
self.exit_score = exit_score
self.max_concurrent_trials = max_concurrent_trials
self.max_cores_per_trial = max_cores_per_trial
+ self.max_nodes = max_nodes
self.max_trials = max_trials
+ self.sweep_concurrent_trials = sweep_concurrent_trials
+ self.sweep_trials = sweep_trials
self.timeout = timeout
self.trial_timeout = trial_timeout
@@ -20399,7 +30793,7 @@ class TensorFlow(DistributionConfiguration):
All required parameters must be populated in order to send to Azure.
:ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
- Known values are: "PyTorch", "TensorFlow", and "Mpi".
+ Known values are: "PyTorch", "TensorFlow", "Mpi", and "Ray".
:vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
:ivar parameter_server_count: Number of parameter server tasks.
:vartype parameter_server_count: int
@@ -20430,7 +30824,7 @@ def __init__(self, *, parameter_server_count: int = 0, worker_count: Optional[in
self.worker_count = worker_count
-class TextClassification(NlpVertical, AutoMLVertical):
+class TextClassification(NlpVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
"""Text Classification task in AutoML NLP vertical.
NLP - Natural Language Processing.
@@ -20452,8 +30846,16 @@ class TextClassification(NlpVertical, AutoMLVertical):
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :ivar fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :vartype fixed_parameters: ~azure.mgmt.machinelearningservices.models.NlpFixedParameters
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space: list[~azure.mgmt.machinelearningservices.models.NlpParameterSubspace]
+ :ivar sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.NlpSweepSettings
:ivar validation_data: Validation data inputs.
:vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:ivar primary_metric: Primary metric for Text-Classification task. Known values are:
@@ -20474,7 +30876,10 @@ class TextClassification(NlpVertical, AutoMLVertical):
"task_type": {"key": "taskType", "type": "str"},
"training_data": {"key": "trainingData", "type": "MLTableJobInput"},
"featurization_settings": {"key": "featurizationSettings", "type": "NlpVerticalFeaturizationSettings"},
+ "fixed_parameters": {"key": "fixedParameters", "type": "NlpFixedParameters"},
"limit_settings": {"key": "limitSettings", "type": "NlpVerticalLimitSettings"},
+ "search_space": {"key": "searchSpace", "type": "[NlpParameterSubspace]"},
+ "sweep_settings": {"key": "sweepSettings", "type": "NlpSweepSettings"},
"validation_data": {"key": "validationData", "type": "MLTableJobInput"},
"primary_metric": {"key": "primaryMetric", "type": "str"},
}
@@ -20486,7 +30891,10 @@ def __init__(
log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
target_column_name: Optional[str] = None,
featurization_settings: Optional["_models.NlpVerticalFeaturizationSettings"] = None,
+ fixed_parameters: Optional["_models.NlpFixedParameters"] = None,
limit_settings: Optional["_models.NlpVerticalLimitSettings"] = None,
+ search_space: Optional[List["_models.NlpParameterSubspace"]] = None,
+ sweep_settings: Optional["_models.NlpSweepSettings"] = None,
validation_data: Optional["_models.MLTableJobInput"] = None,
primary_metric: Optional[Union[str, "_models.ClassificationPrimaryMetrics"]] = None,
**kwargs: Any
@@ -20503,8 +30911,16 @@ def __init__(
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :keyword fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :paramtype fixed_parameters: ~azure.mgmt.machinelearningservices.models.NlpFixedParameters
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space: list[~azure.mgmt.machinelearningservices.models.NlpParameterSubspace]
+ :keyword sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.NlpSweepSettings
:keyword validation_data: Validation data inputs.
:paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:keyword primary_metric: Primary metric for Text-Classification task. Known values are:
@@ -20515,7 +30931,10 @@ def __init__(
"""
super().__init__(
featurization_settings=featurization_settings,
+ fixed_parameters=fixed_parameters,
limit_settings=limit_settings,
+ search_space=search_space,
+ sweep_settings=sweep_settings,
validation_data=validation_data,
log_verbosity=log_verbosity,
target_column_name=target_column_name,
@@ -20528,11 +30947,14 @@ def __init__(
self.training_data = training_data
self.primary_metric = primary_metric
self.featurization_settings = featurization_settings
+ self.fixed_parameters = fixed_parameters
self.limit_settings = limit_settings
+ self.search_space = search_space
+ self.sweep_settings = sweep_settings
self.validation_data = validation_data
-class TextClassificationMultilabel(NlpVertical, AutoMLVertical):
+class TextClassificationMultilabel(NlpVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
"""Text Classification Multilabel task in AutoML NLP vertical.
NLP - Natural Language Processing.
@@ -20556,8 +30978,16 @@ class TextClassificationMultilabel(NlpVertical, AutoMLVertical):
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :ivar fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :vartype fixed_parameters: ~azure.mgmt.machinelearningservices.models.NlpFixedParameters
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space: list[~azure.mgmt.machinelearningservices.models.NlpParameterSubspace]
+ :ivar sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.NlpSweepSettings
:ivar validation_data: Validation data inputs.
:vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:ivar primary_metric: Primary metric for Text-Classification-Multilabel task.
@@ -20580,7 +31010,10 @@ class TextClassificationMultilabel(NlpVertical, AutoMLVertical):
"task_type": {"key": "taskType", "type": "str"},
"training_data": {"key": "trainingData", "type": "MLTableJobInput"},
"featurization_settings": {"key": "featurizationSettings", "type": "NlpVerticalFeaturizationSettings"},
+ "fixed_parameters": {"key": "fixedParameters", "type": "NlpFixedParameters"},
"limit_settings": {"key": "limitSettings", "type": "NlpVerticalLimitSettings"},
+ "search_space": {"key": "searchSpace", "type": "[NlpParameterSubspace]"},
+ "sweep_settings": {"key": "sweepSettings", "type": "NlpSweepSettings"},
"validation_data": {"key": "validationData", "type": "MLTableJobInput"},
"primary_metric": {"key": "primaryMetric", "type": "str"},
}
@@ -20592,7 +31025,10 @@ def __init__(
log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
target_column_name: Optional[str] = None,
featurization_settings: Optional["_models.NlpVerticalFeaturizationSettings"] = None,
+ fixed_parameters: Optional["_models.NlpFixedParameters"] = None,
limit_settings: Optional["_models.NlpVerticalLimitSettings"] = None,
+ search_space: Optional[List["_models.NlpParameterSubspace"]] = None,
+ sweep_settings: Optional["_models.NlpSweepSettings"] = None,
validation_data: Optional["_models.MLTableJobInput"] = None,
**kwargs: Any
) -> None:
@@ -20608,14 +31044,25 @@ def __init__(
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :keyword fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :paramtype fixed_parameters: ~azure.mgmt.machinelearningservices.models.NlpFixedParameters
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space: list[~azure.mgmt.machinelearningservices.models.NlpParameterSubspace]
+ :keyword sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.NlpSweepSettings
:keyword validation_data: Validation data inputs.
:paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
"""
super().__init__(
featurization_settings=featurization_settings,
+ fixed_parameters=fixed_parameters,
limit_settings=limit_settings,
+ search_space=search_space,
+ sweep_settings=sweep_settings,
validation_data=validation_data,
log_verbosity=log_verbosity,
target_column_name=target_column_name,
@@ -20628,11 +31075,14 @@ def __init__(
self.training_data = training_data
self.primary_metric = None
self.featurization_settings = featurization_settings
+ self.fixed_parameters = fixed_parameters
self.limit_settings = limit_settings
+ self.search_space = search_space
+ self.sweep_settings = sweep_settings
self.validation_data = validation_data
-class TextNer(NlpVertical, AutoMLVertical):
+class TextNer(NlpVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
"""Text-NER task in AutoML NLP vertical.
NER - Named Entity Recognition.
NLP - Natural Language Processing.
@@ -20657,8 +31107,16 @@ class TextNer(NlpVertical, AutoMLVertical):
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :ivar fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :vartype fixed_parameters: ~azure.mgmt.machinelearningservices.models.NlpFixedParameters
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space: list[~azure.mgmt.machinelearningservices.models.NlpParameterSubspace]
+ :ivar sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.NlpSweepSettings
:ivar validation_data: Validation data inputs.
:vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:ivar primary_metric: Primary metric for Text-NER task.
@@ -20681,7 +31139,10 @@ class TextNer(NlpVertical, AutoMLVertical):
"task_type": {"key": "taskType", "type": "str"},
"training_data": {"key": "trainingData", "type": "MLTableJobInput"},
"featurization_settings": {"key": "featurizationSettings", "type": "NlpVerticalFeaturizationSettings"},
+ "fixed_parameters": {"key": "fixedParameters", "type": "NlpFixedParameters"},
"limit_settings": {"key": "limitSettings", "type": "NlpVerticalLimitSettings"},
+ "search_space": {"key": "searchSpace", "type": "[NlpParameterSubspace]"},
+ "sweep_settings": {"key": "sweepSettings", "type": "NlpSweepSettings"},
"validation_data": {"key": "validationData", "type": "MLTableJobInput"},
"primary_metric": {"key": "primaryMetric", "type": "str"},
}
@@ -20693,7 +31154,10 @@ def __init__(
log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
target_column_name: Optional[str] = None,
featurization_settings: Optional["_models.NlpVerticalFeaturizationSettings"] = None,
+ fixed_parameters: Optional["_models.NlpFixedParameters"] = None,
limit_settings: Optional["_models.NlpVerticalLimitSettings"] = None,
+ search_space: Optional[List["_models.NlpParameterSubspace"]] = None,
+ sweep_settings: Optional["_models.NlpSweepSettings"] = None,
validation_data: Optional["_models.MLTableJobInput"] = None,
**kwargs: Any
) -> None:
@@ -20709,14 +31173,25 @@ def __init__(
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :keyword fixed_parameters: Model/training parameters that will remain constant throughout
+ training.
+ :paramtype fixed_parameters: ~azure.mgmt.machinelearningservices.models.NlpFixedParameters
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space: list[~azure.mgmt.machinelearningservices.models.NlpParameterSubspace]
+ :keyword sweep_settings: Settings for model sweeping and hyperparameter tuning.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.NlpSweepSettings
:keyword validation_data: Validation data inputs.
:paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
"""
super().__init__(
featurization_settings=featurization_settings,
+ fixed_parameters=fixed_parameters,
limit_settings=limit_settings,
+ search_space=search_space,
+ sweep_settings=sweep_settings,
validation_data=validation_data,
log_verbosity=log_verbosity,
target_column_name=target_column_name,
@@ -20729,12 +31204,15 @@ def __init__(
self.training_data = training_data
self.primary_metric = None
self.featurization_settings = featurization_settings
+ self.fixed_parameters = fixed_parameters
self.limit_settings = limit_settings
+ self.search_space = search_space
+ self.sweep_settings = sweep_settings
self.validation_data = validation_data
class TmpfsOptions(_serialization.Model):
- """Describes the tmpfs options for the container.
+ """TmpfsOptions.
:ivar size: Mention the Tmpfs size.
:vartype size: int
@@ -20753,6 +31231,39 @@ def __init__(self, *, size: Optional[int] = None, **kwargs: Any) -> None:
self.size = size
+class TopNFeaturesByAttribution(MonitoringFeatureFilterBase):
+ """TopNFeaturesByAttribution.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar filter_type: [Required] Specifies the feature filter to leverage when selecting features
+ to calculate metrics over. Required. Known values are: "AllFeatures", "TopNByAttribution", and
+ "FeatureSubset".
+ :vartype filter_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterType
+ :ivar top: The number of top features to include.
+ :vartype top: int
+ """
+
+ _validation = {
+ "filter_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "filter_type": {"key": "filterType", "type": "str"},
+ "top": {"key": "top", "type": "int"},
+ }
+
+ def __init__(self, *, top: int = 10, **kwargs: Any) -> None:
+ """
+ :keyword top: The number of top features to include.
+ :paramtype top: int
+ """
+ super().__init__(**kwargs)
+ self.filter_type: str = "TopNByAttribution"
+ self.top = top
+
+
class TrialComponent(_serialization.Model):
"""Trial component definition.
@@ -20826,6 +31337,41 @@ def __init__(
self.resources = resources
+class TritonInferencingServer(InferencingServer):
+ """Triton inferencing server configurations.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar server_type: [Required] Inferencing server type for various targets. Required. Known
+ values are: "AzureMLOnline", "AzureMLBatch", "Triton", and "Custom".
+ :vartype server_type: str or ~azure.mgmt.machinelearningservices.models.InferencingServerType
+ :ivar inference_configuration: Inference configuration for Triton.
+ :vartype inference_configuration:
+ ~azure.mgmt.machinelearningservices.models.OnlineInferenceConfiguration
+ """
+
+ _validation = {
+ "server_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "server_type": {"key": "serverType", "type": "str"},
+ "inference_configuration": {"key": "inferenceConfiguration", "type": "OnlineInferenceConfiguration"},
+ }
+
+ def __init__(
+ self, *, inference_configuration: Optional["_models.OnlineInferenceConfiguration"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword inference_configuration: Inference configuration for Triton.
+ :paramtype inference_configuration:
+ ~azure.mgmt.machinelearningservices.models.OnlineInferenceConfiguration
+ """
+ super().__init__(**kwargs)
+ self.server_type: str = "Triton"
+ self.inference_configuration = inference_configuration
+
+
class TritonModelJobInput(AssetJobInput, JobInput):
"""TritonModelJobInput.
@@ -20890,7 +31436,14 @@ class TritonModelJobOutput(AssetJobOutput, JobOutput):
:ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
"uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :ivar asset_name: Output Asset Name.
+ :vartype asset_name: str
+ :ivar asset_version: Output Asset Version.
+ :vartype asset_version: str
+ :ivar auto_delete_setting: Auto delete setting of output data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
@@ -20903,6 +31456,9 @@ class TritonModelJobOutput(AssetJobOutput, JobOutput):
_attribute_map = {
"description": {"key": "description", "type": "str"},
"job_output_type": {"key": "jobOutputType", "type": "str"},
+ "asset_name": {"key": "assetName", "type": "str"},
+ "asset_version": {"key": "assetVersion", "type": "str"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
"mode": {"key": "mode", "type": "str"},
"uri": {"key": "uri", "type": "str"},
}
@@ -20911,6 +31467,9 @@ def __init__(
self,
*,
description: Optional[str] = None,
+ asset_name: Optional[str] = None,
+ asset_version: Optional[str] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
**kwargs: Any
@@ -20918,14 +31477,32 @@ def __init__(
"""
:keyword description: Description for the output.
:paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :keyword asset_name: Output Asset Name.
+ :paramtype asset_name: str
+ :keyword asset_version: Output Asset Version.
+ :paramtype asset_version: str
+ :keyword auto_delete_setting: Auto delete setting of output data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ super().__init__(
+ asset_name=asset_name,
+ asset_version=asset_version,
+ auto_delete_setting=auto_delete_setting,
+ mode=mode,
+ uri=uri,
+ description=description,
+ **kwargs
+ )
self.description = description
self.job_output_type: str = "triton_model"
+ self.asset_name = asset_name
+ self.asset_version = asset_version
+ self.auto_delete_setting = auto_delete_setting
self.mode = mode
self.uri = uri
@@ -21067,9 +31644,13 @@ class UriFileDataVersion(DataVersionBaseProperties):
:vartype properties: dict[str, str]
:ivar tags: Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:vartype is_archived: bool
:ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
"uri_folder", and "mltable".
@@ -21077,6 +31658,11 @@ class UriFileDataVersion(DataVersionBaseProperties):
:ivar data_uri: [Required] Uri of the data. Example:
https://go.microsoft.com/fwlink/?linkid=2202330. Required.
:vartype data_uri: str
+ :ivar intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :ivar stage: Stage in the data lifecycle assigned to this data asset.
+ :vartype stage: str
"""
_validation = {
@@ -21088,10 +31674,13 @@ class UriFileDataVersion(DataVersionBaseProperties):
"description": {"key": "description", "type": "str"},
"properties": {"key": "properties", "type": "{str}"},
"tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
"is_anonymous": {"key": "isAnonymous", "type": "bool"},
"is_archived": {"key": "isArchived", "type": "bool"},
"data_type": {"key": "dataType", "type": "str"},
"data_uri": {"key": "dataUri", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
+ "stage": {"key": "stage", "type": "str"},
}
def __init__(
@@ -21101,8 +31690,11 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
is_anonymous: bool = False,
is_archived: bool = False,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> None:
"""
@@ -21112,21 +31704,34 @@ def __init__(
:paramtype properties: dict[str, str]
:keyword tags: Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:paramtype is_archived: bool
:keyword data_uri: [Required] Uri of the data. Example:
https://go.microsoft.com/fwlink/?linkid=2202330. Required.
:paramtype data_uri: str
+ :keyword intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword stage: Stage in the data lifecycle assigned to this data asset.
+ :paramtype stage: str
"""
super().__init__(
description=description,
properties=properties,
tags=tags,
+ auto_delete_setting=auto_delete_setting,
is_anonymous=is_anonymous,
is_archived=is_archived,
data_uri=data_uri,
+ intellectual_property=intellectual_property,
+ stage=stage,
**kwargs
)
self.data_type: str = "uri_file"
@@ -21196,7 +31801,14 @@ class UriFileJobOutput(AssetJobOutput, JobOutput):
:ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
"uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :ivar asset_name: Output Asset Name.
+ :vartype asset_name: str
+ :ivar asset_version: Output Asset Version.
+ :vartype asset_version: str
+ :ivar auto_delete_setting: Auto delete setting of output data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
@@ -21209,6 +31821,9 @@ class UriFileJobOutput(AssetJobOutput, JobOutput):
_attribute_map = {
"description": {"key": "description", "type": "str"},
"job_output_type": {"key": "jobOutputType", "type": "str"},
+ "asset_name": {"key": "assetName", "type": "str"},
+ "asset_version": {"key": "assetVersion", "type": "str"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
"mode": {"key": "mode", "type": "str"},
"uri": {"key": "uri", "type": "str"},
}
@@ -21217,6 +31832,9 @@ def __init__(
self,
*,
description: Optional[str] = None,
+ asset_name: Optional[str] = None,
+ asset_version: Optional[str] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
**kwargs: Any
@@ -21224,14 +31842,32 @@ def __init__(
"""
:keyword description: Description for the output.
:paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :keyword asset_name: Output Asset Name.
+ :paramtype asset_name: str
+ :keyword asset_version: Output Asset Version.
+ :paramtype asset_version: str
+ :keyword auto_delete_setting: Auto delete setting of output data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ super().__init__(
+ asset_name=asset_name,
+ asset_version=asset_version,
+ auto_delete_setting=auto_delete_setting,
+ mode=mode,
+ uri=uri,
+ description=description,
+ **kwargs
+ )
self.description = description
self.job_output_type: str = "uri_file"
+ self.asset_name = asset_name
+ self.asset_version = asset_version
+ self.auto_delete_setting = auto_delete_setting
self.mode = mode
self.uri = uri
@@ -21247,9 +31883,13 @@ class UriFolderDataVersion(DataVersionBaseProperties):
:vartype properties: dict[str, str]
:ivar tags: Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
+ :ivar auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
+ :ivar is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:vartype is_archived: bool
:ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
"uri_folder", and "mltable".
@@ -21257,6 +31897,11 @@ class UriFolderDataVersion(DataVersionBaseProperties):
:ivar data_uri: [Required] Uri of the data. Example:
https://go.microsoft.com/fwlink/?linkid=2202330. Required.
:vartype data_uri: str
+ :ivar intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :vartype intellectual_property: ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :ivar stage: Stage in the data lifecycle assigned to this data asset.
+ :vartype stage: str
"""
_validation = {
@@ -21268,10 +31913,13 @@ class UriFolderDataVersion(DataVersionBaseProperties):
"description": {"key": "description", "type": "str"},
"properties": {"key": "properties", "type": "{str}"},
"tags": {"key": "tags", "type": "{str}"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
"is_anonymous": {"key": "isAnonymous", "type": "bool"},
"is_archived": {"key": "isArchived", "type": "bool"},
"data_type": {"key": "dataType", "type": "str"},
"data_uri": {"key": "dataUri", "type": "str"},
+ "intellectual_property": {"key": "intellectualProperty", "type": "IntellectualProperty"},
+ "stage": {"key": "stage", "type": "str"},
}
def __init__(
@@ -21281,8 +31929,11 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
is_anonymous: bool = False,
is_archived: bool = False,
+ intellectual_property: Optional["_models.IntellectualProperty"] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> None:
"""
@@ -21292,21 +31943,34 @@ def __init__(
:paramtype properties: dict[str, str]
:keyword tags: Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
+ :keyword auto_delete_setting: Specifies the lifecycle setting of managed data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword is_anonymous: If the name version are system generated (anonymous registration). For
+ types where Stage is defined, when Stage is provided it will be used to populate IsAnonymous.
:paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
+ :keyword is_archived: Is the asset archived? For types where Stage is defined, when Stage is
+ provided it will be used to populate IsArchived.
:paramtype is_archived: bool
:keyword data_uri: [Required] Uri of the data. Example:
https://go.microsoft.com/fwlink/?linkid=2202330. Required.
:paramtype data_uri: str
+ :keyword intellectual_property: Intellectual Property details. Used if data is an Intellectual
+ Property.
+ :paramtype intellectual_property:
+ ~azure.mgmt.machinelearningservices.models.IntellectualProperty
+ :keyword stage: Stage in the data lifecycle assigned to this data asset.
+ :paramtype stage: str
"""
super().__init__(
description=description,
properties=properties,
tags=tags,
+ auto_delete_setting=auto_delete_setting,
is_anonymous=is_anonymous,
is_archived=is_archived,
data_uri=data_uri,
+ intellectual_property=intellectual_property,
+ stage=stage,
**kwargs
)
self.data_type: str = "uri_folder"
@@ -21376,7 +32040,14 @@ class UriFolderJobOutput(AssetJobOutput, JobOutput):
:ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
"uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :ivar asset_name: Output Asset Name.
+ :vartype asset_name: str
+ :ivar asset_version: Output Asset Version.
+ :vartype asset_version: str
+ :ivar auto_delete_setting: Auto delete setting of output data asset.
+ :vartype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
@@ -21389,6 +32060,9 @@ class UriFolderJobOutput(AssetJobOutput, JobOutput):
_attribute_map = {
"description": {"key": "description", "type": "str"},
"job_output_type": {"key": "jobOutputType", "type": "str"},
+ "asset_name": {"key": "assetName", "type": "str"},
+ "asset_version": {"key": "assetVersion", "type": "str"},
+ "auto_delete_setting": {"key": "autoDeleteSetting", "type": "AutoDeleteSetting"},
"mode": {"key": "mode", "type": "str"},
"uri": {"key": "uri", "type": "str"},
}
@@ -21397,6 +32071,9 @@ def __init__(
self,
*,
description: Optional[str] = None,
+ asset_name: Optional[str] = None,
+ asset_version: Optional[str] = None,
+ auto_delete_setting: Optional["_models.AutoDeleteSetting"] = None,
mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
**kwargs: Any
@@ -21404,14 +32081,32 @@ def __init__(
"""
:keyword description: Description for the output.
:paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :keyword asset_name: Output Asset Name.
+ :paramtype asset_name: str
+ :keyword asset_version: Output Asset Version.
+ :paramtype asset_version: str
+ :keyword auto_delete_setting: Auto delete setting of output data asset.
+ :paramtype auto_delete_setting: ~azure.mgmt.machinelearningservices.models.AutoDeleteSetting
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ super().__init__(
+ asset_name=asset_name,
+ asset_version=asset_version,
+ auto_delete_setting=auto_delete_setting,
+ mode=mode,
+ uri=uri,
+ description=description,
+ **kwargs
+ )
self.description = description
self.job_output_type: str = "uri_folder"
+ self.asset_name = asset_name
+ self.asset_version = asset_version
+ self.auto_delete_setting = auto_delete_setting
self.mode = mode
self.uri = uri
@@ -21640,20 +32335,30 @@ def __init__(self, **kwargs: Any) -> None:
class UsernamePasswordAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
"""UsernamePasswordAuthTypeWorkspaceConnectionProperties.
+ Variables are only populated by the server, and will be ignored when sending a request.
+
All required parameters must be populated in order to send to Azure.
:ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "ServicePrincipal", "AccessKey",
+ "ApiKey", and "CustomKeys".
:vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
:ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
:vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id: The arm id of the workspace which created this connection.
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :vartype is_shared_to_all: bool
+ :ivar metadata: Any object.
+ :vartype metadata: JSON
:ivar target:
:vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
:ivar credentials:
:vartype credentials:
~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUsernamePassword
@@ -21661,14 +32366,17 @@ class UsernamePasswordAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionP
_validation = {
"auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
}
_attribute_map = {
"auth_type": {"key": "authType", "type": "str"},
"category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "metadata": {"key": "metadata", "type": "object"},
"target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
"credentials": {"key": "credentials", "type": "WorkspaceConnectionUsernamePassword"},
}
@@ -21676,27 +32384,40 @@ def __init__(
self,
*,
category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ metadata: Optional[JSON] = None,
target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
credentials: Optional["_models.WorkspaceConnectionUsernamePassword"] = None,
**kwargs: Any
) -> None:
"""
:keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI",
+ "CognitiveSearch", "CognitiveService", and "CustomKeys".
:paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all: whether this connection will be shared to all the project workspace
+ under the hub.
+ :paramtype is_shared_to_all: bool
+ :keyword metadata: Any object.
+ :paramtype metadata: JSON
:keyword target:
:paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
:keyword credentials:
:paramtype credentials:
~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUsernamePassword
"""
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ metadata=metadata,
+ target=target,
+ **kwargs
+ )
self.auth_type: str = "UsernamePassword"
self.credentials = credentials
@@ -22130,7 +32851,7 @@ def __init__(
class VolumeDefinition(_serialization.Model):
- """Describes the volume configuration for the container.
+ """VolumeDefinition.
:ivar type: Type of Volume Definition. Possible Values: bind,volume,tmpfs,npipe. Known values
are: "bind", "volume", "tmpfs", and "npipe".
@@ -22207,7 +32928,7 @@ def __init__(
class VolumeOptions(_serialization.Model):
- """Describes the volume options for the container.
+ """VolumeOptions.
:ivar nocopy: Indicate whether volume is nocopy.
:vartype nocopy: bool
@@ -22242,83 +32963,118 @@ class Workspace(Resource): # pylint: disable=too-many-instance-attributes
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar identity: The identity of the resource.
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
:vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar location: Specifies the location of the resource.
+ :ivar kind:
+ :vartype kind: str
+ :ivar location:
:vartype location: str
- :ivar tags: Contains resource tags defined as key/value pairs.
- :vartype tags: dict[str, str]
- :ivar sku: The sku of the workspace.
+ :ivar sku: Optional. This field is required to be implemented by the RP because AML is
+ supporting more than one tier.
:vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :ivar workspace_id: The immutable id associated with this workspace.
- :vartype workspace_id: str
- :ivar description: The description of this workspace.
- :vartype description: str
- :ivar friendly_name: The friendly name for this workspace. This name in mutable.
- :vartype friendly_name: str
- :ivar key_vault: ARM id of the key vault associated with this workspace. This cannot be changed
- once the workspace has been created.
- :vartype key_vault: str
+ :ivar tags: Dictionary of :code:``.
+ :vartype tags: dict[str, str]
+ :ivar allow_public_access_when_behind_vnet: The flag to indicate whether to allow public access
+ when behind VNet.
+ :vartype allow_public_access_when_behind_vnet: bool
:ivar application_insights: ARM id of the application insights associated with this workspace.
:vartype application_insights: str
+ :ivar associated_workspaces:
+ :vartype associated_workspaces: list[str]
+ :ivar container_registries:
+ :vartype container_registries: list[str]
:ivar container_registry: ARM id of the container registry associated with this workspace.
:vartype container_registry: str
- :ivar storage_account: ARM id of the storage account associated with this workspace. This
- cannot be changed once the workspace has been created.
- :vartype storage_account: str
+ :ivar description: The description of this workspace.
+ :vartype description: str
:ivar discovery_url: Url for the discovery service to identify regional endpoints for machine
learning experimentation services.
:vartype discovery_url: str
- :ivar provisioning_state: The current deployment state of workspace resource. The
- provisioningState is to indicate states for resource provisioning. Known values are: "Unknown",
- "Updating", "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningState
- :ivar encryption: The encryption settings of Azure ML workspace.
+ :ivar enable_data_isolation:
+ :vartype enable_data_isolation: bool
+ :ivar encryption:
:vartype encryption: ~azure.mgmt.machinelearningservices.models.EncryptionProperty
+ :ivar existing_workspaces:
+ :vartype existing_workspaces: list[str]
+ :ivar feature_store_settings: Settings for feature store type workspace.
+ :vartype feature_store_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureStoreSettings
+ :ivar friendly_name: The friendly name for this workspace. This name in mutable.
+ :vartype friendly_name: str
:ivar hbi_workspace: The flag to signal HBI data in the workspace and reduce diagnostic data
collected by the service.
:vartype hbi_workspace: bool
- :ivar service_provisioned_resource_group: The name of the managed resource group created by
- workspace RP in customer subscription if the workspace is CMK workspace.
- :vartype service_provisioned_resource_group: str
- :ivar private_link_count: Count of private connections in the workspace.
- :vartype private_link_count: int
+ :ivar hub_resource_id:
+ :vartype hub_resource_id: str
:ivar image_build_compute: The compute name for image build.
:vartype image_build_compute: str
- :ivar allow_public_access_when_behind_vnet: The flag to indicate whether to allow public access
- when behind VNet.
- :vartype allow_public_access_when_behind_vnet: bool
- :ivar public_network_access: Whether requests from Public Network are allowed. Known values
- are: "Enabled" and "Disabled".
- :vartype public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.PublicNetworkAccess
+ :ivar key_vault: ARM id of the key vault associated with this workspace. This cannot be changed
+ once the workspace has been created.
+ :vartype key_vault: str
+ :ivar key_vaults:
+ :vartype key_vaults: list[str]
+ :ivar managed_network: Managed Network settings for a machine learning workspace.
+ :vartype managed_network: ~azure.mgmt.machinelearningservices.models.ManagedNetworkSettings
+ :ivar ml_flow_tracking_uri: The URI associated with this workspace that machine learning flow
+ must point at to set up tracking.
+ :vartype ml_flow_tracking_uri: str
+ :ivar notebook_info: The notebook info of Azure ML workspace.
+ :vartype notebook_info: ~azure.mgmt.machinelearningservices.models.NotebookResourceInfo
+ :ivar primary_user_assigned_identity: The user assigned identity resource id that represents
+ the workspace identity.
+ :vartype primary_user_assigned_identity: str
:ivar private_endpoint_connections: The list of private endpoint connections in the workspace.
:vartype private_endpoint_connections:
list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
+ :ivar private_link_count: Count of private connections in the workspace.
+ :vartype private_link_count: int
+ :ivar provisioning_state: The current deployment state of workspace resource. The
+ provisioningState is to indicate states for resource provisioning. Known values are: "Unknown",
+ "Updating", "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningState
+ :ivar serverless_compute_custom_subnet: The resource ID of an existing virtual network subnet
+ in which serverless compute nodes should be deployed.
+ :vartype serverless_compute_custom_subnet: str
+ :ivar serverless_compute_no_public_ip: The flag to signal if serverless compute nodes deployed
+ in custom vNet would have no public IP addresses for a workspace with private endpoint.
+ :vartype serverless_compute_no_public_ip: bool
+ :ivar public_network_access: Whether requests from Public Network are allowed. Known values
+ are: "Enabled" and "Disabled".
+ :vartype public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
+ :ivar service_managed_resources_settings: The service managed resource settings.
+ :vartype service_managed_resources_settings:
+ ~azure.mgmt.machinelearningservices.models.ServiceManagedResourcesSettings
+ :ivar service_provisioned_resource_group: The name of the managed resource group created by
+ workspace RP in customer subscription if the workspace is CMK workspace.
+ :vartype service_provisioned_resource_group: str
:ivar shared_private_link_resources: The list of shared private link resources in this
workspace.
:vartype shared_private_link_resources:
list[~azure.mgmt.machinelearningservices.models.SharedPrivateLinkResource]
- :ivar notebook_info: The notebook info of Azure ML workspace.
- :vartype notebook_info: ~azure.mgmt.machinelearningservices.models.NotebookResourceInfo
- :ivar service_managed_resources_settings: The service managed resource settings.
- :vartype service_managed_resources_settings:
- ~azure.mgmt.machinelearningservices.models.ServiceManagedResourcesSettings
- :ivar primary_user_assigned_identity: The user assigned identity resource id that represents
- the workspace identity.
- :vartype primary_user_assigned_identity: str
- :ivar tenant_id: The tenant id associated with this workspace.
- :vartype tenant_id: str
+ :ivar soft_delete_retention_in_days: Retention time in days after workspace get soft deleted.
+ :vartype soft_delete_retention_in_days: int
+ :ivar storage_account: ARM id of the storage account associated with this workspace. This
+ cannot be changed once the workspace has been created.
+ :vartype storage_account: str
+ :ivar storage_accounts:
+ :vartype storage_accounts: list[str]
:ivar storage_hns_enabled: If the storage associated with the workspace has hierarchical
namespace(HNS) enabled.
:vartype storage_hns_enabled: bool
- :ivar ml_flow_tracking_uri: The URI associated with this workspace that machine learning flow
- must point at to set up tracking.
- :vartype ml_flow_tracking_uri: str
+ :ivar system_datastores_auth_mode: The auth mode used for accessing the system datastores of
+ the workspace.
+ :vartype system_datastores_auth_mode: str
+ :ivar tenant_id: The tenant id associated with this workspace.
+ :vartype tenant_id: str
:ivar v1_legacy_mode: Enabling v1_legacy_mode may prevent you from using features provided by
the v2 API.
:vartype v1_legacy_mode: bool
+ :ivar workspace_hub_config: WorkspaceHub's configuration object.
+ :vartype workspace_hub_config: ~azure.mgmt.machinelearningservices.models.WorkspaceHubConfig
+ :ivar workspace_id: The immutable id associated with this workspace.
+ :vartype workspace_id: str
"""
_validation = {
@@ -22326,15 +33082,15 @@ class Workspace(Resource): # pylint: disable=too-many-instance-attributes
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
- "workspace_id": {"readonly": True},
+ "ml_flow_tracking_uri": {"readonly": True},
+ "notebook_info": {"readonly": True},
+ "private_endpoint_connections": {"readonly": True},
+ "private_link_count": {"readonly": True},
"provisioning_state": {"readonly": True},
"service_provisioned_resource_group": {"readonly": True},
- "private_link_count": {"readonly": True},
- "private_endpoint_connections": {"readonly": True},
- "notebook_info": {"readonly": True},
- "tenant_id": {"readonly": True},
"storage_hns_enabled": {"readonly": True},
- "ml_flow_tracking_uri": {"readonly": True},
+ "tenant_id": {"readonly": True},
+ "workspace_id": {"readonly": True},
}
_attribute_map = {
@@ -22343,181 +33099,311 @@ class Workspace(Resource): # pylint: disable=too-many-instance-attributes
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
"location": {"key": "location", "type": "str"},
- "tags": {"key": "tags", "type": "{str}"},
"sku": {"key": "sku", "type": "Sku"},
- "workspace_id": {"key": "properties.workspaceId", "type": "str"},
- "description": {"key": "properties.description", "type": "str"},
- "friendly_name": {"key": "properties.friendlyName", "type": "str"},
- "key_vault": {"key": "properties.keyVault", "type": "str"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "allow_public_access_when_behind_vnet": {"key": "properties.allowPublicAccessWhenBehindVnet", "type": "bool"},
"application_insights": {"key": "properties.applicationInsights", "type": "str"},
+ "associated_workspaces": {"key": "properties.associatedWorkspaces", "type": "[str]"},
+ "container_registries": {"key": "properties.containerRegistries", "type": "[str]"},
"container_registry": {"key": "properties.containerRegistry", "type": "str"},
- "storage_account": {"key": "properties.storageAccount", "type": "str"},
+ "description": {"key": "properties.description", "type": "str"},
"discovery_url": {"key": "properties.discoveryUrl", "type": "str"},
- "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "enable_data_isolation": {"key": "properties.enableDataIsolation", "type": "bool"},
"encryption": {"key": "properties.encryption", "type": "EncryptionProperty"},
+ "existing_workspaces": {"key": "properties.existingWorkspaces", "type": "[str]"},
+ "feature_store_settings": {"key": "properties.featureStoreSettings", "type": "FeatureStoreSettings"},
+ "friendly_name": {"key": "properties.friendlyName", "type": "str"},
"hbi_workspace": {"key": "properties.hbiWorkspace", "type": "bool"},
- "service_provisioned_resource_group": {"key": "properties.serviceProvisionedResourceGroup", "type": "str"},
- "private_link_count": {"key": "properties.privateLinkCount", "type": "int"},
+ "hub_resource_id": {"key": "properties.hubResourceId", "type": "str"},
"image_build_compute": {"key": "properties.imageBuildCompute", "type": "str"},
- "allow_public_access_when_behind_vnet": {"key": "properties.allowPublicAccessWhenBehindVnet", "type": "bool"},
- "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
+ "key_vault": {"key": "properties.keyVault", "type": "str"},
+ "key_vaults": {"key": "properties.keyVaults", "type": "[str]"},
+ "managed_network": {"key": "properties.managedNetwork", "type": "ManagedNetworkSettings"},
+ "ml_flow_tracking_uri": {"key": "properties.mlFlowTrackingUri", "type": "str"},
+ "notebook_info": {"key": "properties.notebookInfo", "type": "NotebookResourceInfo"},
+ "primary_user_assigned_identity": {"key": "properties.primaryUserAssignedIdentity", "type": "str"},
"private_endpoint_connections": {
"key": "properties.privateEndpointConnections",
"type": "[PrivateEndpointConnection]",
},
- "shared_private_link_resources": {
- "key": "properties.sharedPrivateLinkResources",
- "type": "[SharedPrivateLinkResource]",
- },
- "notebook_info": {"key": "properties.notebookInfo", "type": "NotebookResourceInfo"},
+ "private_link_count": {"key": "properties.privateLinkCount", "type": "int"},
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "serverless_compute_custom_subnet": {"key": "properties.serverlessComputeCustomSubnet", "type": "str"},
+ "serverless_compute_no_public_ip": {"key": "properties.serverlessComputeNoPublicIP", "type": "bool"},
+ "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
"service_managed_resources_settings": {
"key": "properties.serviceManagedResourcesSettings",
"type": "ServiceManagedResourcesSettings",
},
- "primary_user_assigned_identity": {"key": "properties.primaryUserAssignedIdentity", "type": "str"},
- "tenant_id": {"key": "properties.tenantId", "type": "str"},
+ "service_provisioned_resource_group": {"key": "properties.serviceProvisionedResourceGroup", "type": "str"},
+ "shared_private_link_resources": {
+ "key": "properties.sharedPrivateLinkResources",
+ "type": "[SharedPrivateLinkResource]",
+ },
+ "soft_delete_retention_in_days": {"key": "properties.softDeleteRetentionInDays", "type": "int"},
+ "storage_account": {"key": "properties.storageAccount", "type": "str"},
+ "storage_accounts": {"key": "properties.storageAccounts", "type": "[str]"},
"storage_hns_enabled": {"key": "properties.storageHnsEnabled", "type": "bool"},
- "ml_flow_tracking_uri": {"key": "properties.mlFlowTrackingUri", "type": "str"},
+ "system_datastores_auth_mode": {"key": "properties.systemDatastoresAuthMode", "type": "str"},
+ "tenant_id": {"key": "properties.tenantId", "type": "str"},
"v1_legacy_mode": {"key": "properties.v1LegacyMode", "type": "bool"},
+ "workspace_hub_config": {"key": "properties.workspaceHubConfig", "type": "WorkspaceHubConfig"},
+ "workspace_id": {"key": "properties.workspaceId", "type": "str"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
location: Optional[str] = None,
- tags: Optional[Dict[str, str]] = None,
sku: Optional["_models.Sku"] = None,
- description: Optional[str] = None,
- friendly_name: Optional[str] = None,
- key_vault: Optional[str] = None,
+ tags: Optional[Dict[str, str]] = None,
+ allow_public_access_when_behind_vnet: Optional[bool] = None,
application_insights: Optional[str] = None,
+ associated_workspaces: Optional[List[str]] = None,
+ container_registries: Optional[List[str]] = None,
container_registry: Optional[str] = None,
- storage_account: Optional[str] = None,
+ description: Optional[str] = None,
discovery_url: Optional[str] = None,
+ enable_data_isolation: Optional[bool] = None,
encryption: Optional["_models.EncryptionProperty"] = None,
- hbi_workspace: bool = False,
+ existing_workspaces: Optional[List[str]] = None,
+ feature_store_settings: Optional["_models.FeatureStoreSettings"] = None,
+ friendly_name: Optional[str] = None,
+ hbi_workspace: Optional[bool] = None,
+ hub_resource_id: Optional[str] = None,
image_build_compute: Optional[str] = None,
- allow_public_access_when_behind_vnet: bool = False,
- public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
- shared_private_link_resources: Optional[List["_models.SharedPrivateLinkResource"]] = None,
- service_managed_resources_settings: Optional["_models.ServiceManagedResourcesSettings"] = None,
+ key_vault: Optional[str] = None,
+ key_vaults: Optional[List[str]] = None,
+ managed_network: Optional["_models.ManagedNetworkSettings"] = None,
primary_user_assigned_identity: Optional[str] = None,
- v1_legacy_mode: bool = False,
+ serverless_compute_custom_subnet: Optional[str] = None,
+ serverless_compute_no_public_ip: Optional[bool] = None,
+ public_network_access: Optional[Union[str, "_models.PublicNetworkAccessType"]] = None,
+ service_managed_resources_settings: Optional["_models.ServiceManagedResourcesSettings"] = None,
+ shared_private_link_resources: Optional[List["_models.SharedPrivateLinkResource"]] = None,
+ soft_delete_retention_in_days: Optional[int] = None,
+ storage_account: Optional[str] = None,
+ storage_accounts: Optional[List[str]] = None,
+ system_datastores_auth_mode: Optional[str] = None,
+ v1_legacy_mode: Optional[bool] = None,
+ workspace_hub_config: Optional["_models.WorkspaceHubConfig"] = None,
**kwargs: Any
) -> None:
"""
- :keyword identity: The identity of the resource.
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
:paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword location: Specifies the location of the resource.
+ :keyword kind:
+ :paramtype kind: str
+ :keyword location:
:paramtype location: str
- :keyword tags: Contains resource tags defined as key/value pairs.
- :paramtype tags: dict[str, str]
- :keyword sku: The sku of the workspace.
+ :keyword sku: Optional. This field is required to be implemented by the RP because AML is
+ supporting more than one tier.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :keyword description: The description of this workspace.
- :paramtype description: str
- :keyword friendly_name: The friendly name for this workspace. This name in mutable.
- :paramtype friendly_name: str
- :keyword key_vault: ARM id of the key vault associated with this workspace. This cannot be
- changed once the workspace has been created.
- :paramtype key_vault: str
+ :keyword tags: Dictionary of :code:``.
+ :paramtype tags: dict[str, str]
+ :keyword allow_public_access_when_behind_vnet: The flag to indicate whether to allow public
+ access when behind VNet.
+ :paramtype allow_public_access_when_behind_vnet: bool
:keyword application_insights: ARM id of the application insights associated with this
workspace.
:paramtype application_insights: str
+ :keyword associated_workspaces:
+ :paramtype associated_workspaces: list[str]
+ :keyword container_registries:
+ :paramtype container_registries: list[str]
:keyword container_registry: ARM id of the container registry associated with this workspace.
:paramtype container_registry: str
- :keyword storage_account: ARM id of the storage account associated with this workspace. This
- cannot be changed once the workspace has been created.
- :paramtype storage_account: str
+ :keyword description: The description of this workspace.
+ :paramtype description: str
:keyword discovery_url: Url for the discovery service to identify regional endpoints for
machine learning experimentation services.
:paramtype discovery_url: str
- :keyword encryption: The encryption settings of Azure ML workspace.
+ :keyword enable_data_isolation:
+ :paramtype enable_data_isolation: bool
+ :keyword encryption:
:paramtype encryption: ~azure.mgmt.machinelearningservices.models.EncryptionProperty
+ :keyword existing_workspaces:
+ :paramtype existing_workspaces: list[str]
+ :keyword feature_store_settings: Settings for feature store type workspace.
+ :paramtype feature_store_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureStoreSettings
+ :keyword friendly_name: The friendly name for this workspace. This name in mutable.
+ :paramtype friendly_name: str
:keyword hbi_workspace: The flag to signal HBI data in the workspace and reduce diagnostic data
collected by the service.
:paramtype hbi_workspace: bool
+ :keyword hub_resource_id:
+ :paramtype hub_resource_id: str
:keyword image_build_compute: The compute name for image build.
:paramtype image_build_compute: str
- :keyword allow_public_access_when_behind_vnet: The flag to indicate whether to allow public
- access when behind VNet.
- :paramtype allow_public_access_when_behind_vnet: bool
+ :keyword key_vault: ARM id of the key vault associated with this workspace. This cannot be
+ changed once the workspace has been created.
+ :paramtype key_vault: str
+ :keyword key_vaults:
+ :paramtype key_vaults: list[str]
+ :keyword managed_network: Managed Network settings for a machine learning workspace.
+ :paramtype managed_network: ~azure.mgmt.machinelearningservices.models.ManagedNetworkSettings
+ :keyword primary_user_assigned_identity: The user assigned identity resource id that represents
+ the workspace identity.
+ :paramtype primary_user_assigned_identity: str
+ :keyword serverless_compute_custom_subnet: The resource ID of an existing virtual network
+ subnet in which serverless compute nodes should be deployed.
+ :paramtype serverless_compute_custom_subnet: str
+ :keyword serverless_compute_no_public_ip: The flag to signal if serverless compute nodes
+ deployed in custom vNet would have no public IP addresses for a workspace with private
+ endpoint.
+ :paramtype serverless_compute_no_public_ip: bool
:keyword public_network_access: Whether requests from Public Network are allowed. Known values
are: "Enabled" and "Disabled".
:paramtype public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.PublicNetworkAccess
+ ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
+ :keyword service_managed_resources_settings: The service managed resource settings.
+ :paramtype service_managed_resources_settings:
+ ~azure.mgmt.machinelearningservices.models.ServiceManagedResourcesSettings
:keyword shared_private_link_resources: The list of shared private link resources in this
workspace.
:paramtype shared_private_link_resources:
list[~azure.mgmt.machinelearningservices.models.SharedPrivateLinkResource]
- :keyword service_managed_resources_settings: The service managed resource settings.
- :paramtype service_managed_resources_settings:
- ~azure.mgmt.machinelearningservices.models.ServiceManagedResourcesSettings
- :keyword primary_user_assigned_identity: The user assigned identity resource id that represents
- the workspace identity.
- :paramtype primary_user_assigned_identity: str
+ :keyword soft_delete_retention_in_days: Retention time in days after workspace get soft
+ deleted.
+ :paramtype soft_delete_retention_in_days: int
+ :keyword storage_account: ARM id of the storage account associated with this workspace. This
+ cannot be changed once the workspace has been created.
+ :paramtype storage_account: str
+ :keyword storage_accounts:
+ :paramtype storage_accounts: list[str]
+ :keyword system_datastores_auth_mode: The auth mode used for accessing the system datastores of
+ the workspace.
+ :paramtype system_datastores_auth_mode: str
:keyword v1_legacy_mode: Enabling v1_legacy_mode may prevent you from using features provided
by the v2 API.
:paramtype v1_legacy_mode: bool
+ :keyword workspace_hub_config: WorkspaceHub's configuration object.
+ :paramtype workspace_hub_config: ~azure.mgmt.machinelearningservices.models.WorkspaceHubConfig
"""
super().__init__(**kwargs)
self.identity = identity
+ self.kind = kind
self.location = location
- self.tags = tags
self.sku = sku
- self.workspace_id = None
- self.description = description
- self.friendly_name = friendly_name
- self.key_vault = key_vault
+ self.tags = tags
+ self.allow_public_access_when_behind_vnet = allow_public_access_when_behind_vnet
self.application_insights = application_insights
+ self.associated_workspaces = associated_workspaces
+ self.container_registries = container_registries
self.container_registry = container_registry
- self.storage_account = storage_account
+ self.description = description
self.discovery_url = discovery_url
- self.provisioning_state = None
+ self.enable_data_isolation = enable_data_isolation
self.encryption = encryption
+ self.existing_workspaces = existing_workspaces
+ self.feature_store_settings = feature_store_settings
+ self.friendly_name = friendly_name
self.hbi_workspace = hbi_workspace
- self.service_provisioned_resource_group = None
- self.private_link_count = None
+ self.hub_resource_id = hub_resource_id
self.image_build_compute = image_build_compute
- self.allow_public_access_when_behind_vnet = allow_public_access_when_behind_vnet
- self.public_network_access = public_network_access
- self.private_endpoint_connections = None
- self.shared_private_link_resources = shared_private_link_resources
+ self.key_vault = key_vault
+ self.key_vaults = key_vaults
+ self.managed_network = managed_network
+ self.ml_flow_tracking_uri = None
self.notebook_info = None
- self.service_managed_resources_settings = service_managed_resources_settings
self.primary_user_assigned_identity = primary_user_assigned_identity
- self.tenant_id = None
+ self.private_endpoint_connections = None
+ self.private_link_count = None
+ self.provisioning_state = None
+ self.serverless_compute_custom_subnet = serverless_compute_custom_subnet
+ self.serverless_compute_no_public_ip = serverless_compute_no_public_ip
+ self.public_network_access = public_network_access
+ self.service_managed_resources_settings = service_managed_resources_settings
+ self.service_provisioned_resource_group = None
+ self.shared_private_link_resources = shared_private_link_resources
+ self.soft_delete_retention_in_days = soft_delete_retention_in_days
+ self.storage_account = storage_account
+ self.storage_accounts = storage_accounts
self.storage_hns_enabled = None
- self.ml_flow_tracking_uri = None
+ self.system_datastores_auth_mode = system_datastores_auth_mode
+ self.tenant_id = None
self.v1_legacy_mode = v1_legacy_mode
+ self.workspace_hub_config = workspace_hub_config
+ self.workspace_id = None
+
+
+class WorkspaceConnectionAccessKey(_serialization.Model):
+ """WorkspaceConnectionAccessKey.
+
+ :ivar access_key_id:
+ :vartype access_key_id: str
+ :ivar secret_access_key:
+ :vartype secret_access_key: str
+ """
+
+ _attribute_map = {
+ "access_key_id": {"key": "accessKeyId", "type": "str"},
+ "secret_access_key": {"key": "secretAccessKey", "type": "str"},
+ }
+
+ def __init__(
+ self, *, access_key_id: Optional[str] = None, secret_access_key: Optional[str] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword access_key_id:
+ :paramtype access_key_id: str
+ :keyword secret_access_key:
+ :paramtype secret_access_key: str
+ """
+ super().__init__(**kwargs)
+ self.access_key_id = access_key_id
+ self.secret_access_key = secret_access_key
+
+
+class WorkspaceConnectionApiKey(_serialization.Model):
+ """Api key object for workspace connection credential.
+
+ :ivar key:
+ :vartype key: str
+ """
+
+ _attribute_map = {
+ "key": {"key": "key", "type": "str"},
+ }
+
+ def __init__(self, *, key: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword key:
+ :paramtype key: str
+ """
+ super().__init__(**kwargs)
+ self.key = key
class WorkspaceConnectionManagedIdentity(_serialization.Model):
"""WorkspaceConnectionManagedIdentity.
- :ivar resource_id:
- :vartype resource_id: str
:ivar client_id:
:vartype client_id: str
+ :ivar resource_id:
+ :vartype resource_id: str
"""
_attribute_map = {
- "resource_id": {"key": "resourceId", "type": "str"},
"client_id": {"key": "clientId", "type": "str"},
+ "resource_id": {"key": "resourceId", "type": "str"},
}
- def __init__(self, *, resource_id: Optional[str] = None, client_id: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, client_id: Optional[str] = None, resource_id: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword resource_id:
- :paramtype resource_id: str
:keyword client_id:
:paramtype client_id: str
+ :keyword resource_id:
+ :paramtype resource_id: str
"""
super().__init__(**kwargs)
- self.resource_id = resource_id
self.client_id = client_id
+ self.resource_id = resource_id
class WorkspaceConnectionPersonalAccessToken(_serialization.Model):
@@ -22591,35 +33477,74 @@ def __init__(self, *, properties: "_models.WorkspaceConnectionPropertiesV2", **k
class WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult(_serialization.Model):
"""WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult.
- Variables are only populated by the server, and will be ignored when sending a request.
-
+ :ivar next_link:
+ :vartype next_link: str
:ivar value:
:vartype value:
list[~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource]
- :ivar next_link:
- :vartype next_link: str
"""
- _validation = {
- "next_link": {"readonly": True},
- }
-
_attribute_map = {
- "value": {"key": "value", "type": "[WorkspaceConnectionPropertiesV2BasicResource]"},
"next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[WorkspaceConnectionPropertiesV2BasicResource]"},
}
def __init__(
- self, *, value: Optional[List["_models.WorkspaceConnectionPropertiesV2BasicResource"]] = None, **kwargs: Any
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.WorkspaceConnectionPropertiesV2BasicResource"]] = None,
+ **kwargs: Any
) -> None:
"""
+ :keyword next_link:
+ :paramtype next_link: str
:keyword value:
:paramtype value:
list[~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource]
"""
super().__init__(**kwargs)
+ self.next_link = next_link
self.value = value
- self.next_link = None
+
+
+class WorkspaceConnectionServicePrincipal(_serialization.Model):
+ """WorkspaceConnectionServicePrincipal.
+
+ :ivar client_id:
+ :vartype client_id: str
+ :ivar client_secret:
+ :vartype client_secret: str
+ :ivar tenant_id:
+ :vartype tenant_id: str
+ """
+
+ _attribute_map = {
+ "client_id": {"key": "clientId", "type": "str"},
+ "client_secret": {"key": "clientSecret", "type": "str"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ client_id: Optional[str] = None,
+ client_secret: Optional[str] = None,
+ tenant_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword client_id:
+ :paramtype client_id: str
+ :keyword client_secret:
+ :paramtype client_secret: str
+ :keyword tenant_id:
+ :paramtype tenant_id: str
+ """
+ super().__init__(**kwargs)
+ self.client_id = client_id
+ self.client_secret = client_secret
+ self.tenant_id = tenant_id
class WorkspaceConnectionSharedAccessSignature(_serialization.Model):
@@ -22642,166 +33567,320 @@ def __init__(self, *, sas: Optional[str] = None, **kwargs: Any) -> None:
self.sas = sas
+class WorkspaceConnectionUpdateParameter(_serialization.Model):
+ """The properties that the machine learning workspace connection will be updated with.
+
+ :ivar properties: The properties that the machine learning workspace connection will be updated
+ with.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2
+ """
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "WorkspaceConnectionPropertiesV2"},
+ }
+
+ def __init__(
+ self, *, properties: Optional["_models.WorkspaceConnectionPropertiesV2"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword properties: The properties that the machine learning workspace connection will be
+ updated with.
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
class WorkspaceConnectionUsernamePassword(_serialization.Model):
"""WorkspaceConnectionUsernamePassword.
- :ivar username:
- :vartype username: str
:ivar password:
:vartype password: str
+ :ivar username:
+ :vartype username: str
"""
_attribute_map = {
- "username": {"key": "username", "type": "str"},
"password": {"key": "password", "type": "str"},
+ "username": {"key": "username", "type": "str"},
}
- def __init__(self, *, username: Optional[str] = None, password: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, password: Optional[str] = None, username: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword username:
- :paramtype username: str
:keyword password:
:paramtype password: str
+ :keyword username:
+ :paramtype username: str
"""
super().__init__(**kwargs)
- self.username = username
self.password = password
+ self.username = username
+
+
+class WorkspaceHubConfig(_serialization.Model):
+ """WorkspaceHub's configuration object.
+
+ :ivar additional_workspace_storage_accounts:
+ :vartype additional_workspace_storage_accounts: list[str]
+ :ivar default_workspace_resource_group:
+ :vartype default_workspace_resource_group: str
+ """
+
+ _attribute_map = {
+ "additional_workspace_storage_accounts": {"key": "additionalWorkspaceStorageAccounts", "type": "[str]"},
+ "default_workspace_resource_group": {"key": "defaultWorkspaceResourceGroup", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ additional_workspace_storage_accounts: Optional[List[str]] = None,
+ default_workspace_resource_group: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword additional_workspace_storage_accounts:
+ :paramtype additional_workspace_storage_accounts: list[str]
+ :keyword default_workspace_resource_group:
+ :paramtype default_workspace_resource_group: str
+ """
+ super().__init__(**kwargs)
+ self.additional_workspace_storage_accounts = additional_workspace_storage_accounts
+ self.default_workspace_resource_group = default_workspace_resource_group
class WorkspaceListResult(_serialization.Model):
"""The result of a request to list machine learning workspaces.
+ :ivar next_link: The link to the next page constructed using the continuationToken. If null,
+ there are no additional pages.
+ :vartype next_link: str
:ivar value: The list of machine learning workspaces. Since this list may be incomplete, the
nextLink field should be used to request the next list of machine learning workspaces.
:vartype value: list[~azure.mgmt.machinelearningservices.models.Workspace]
- :ivar next_link: The URI that can be used to request the next list of machine learning
- workspaces.
- :vartype next_link: str
"""
_attribute_map = {
- "value": {"key": "value", "type": "[Workspace]"},
"next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[Workspace]"},
}
def __init__(
- self, *, value: Optional[List["_models.Workspace"]] = None, next_link: Optional[str] = None, **kwargs: Any
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.Workspace"]] = None, **kwargs: Any
) -> None:
"""
+ :keyword next_link: The link to the next page constructed using the continuationToken. If
+ null, there are no additional pages.
+ :paramtype next_link: str
:keyword value: The list of machine learning workspaces. Since this list may be incomplete, the
nextLink field should be used to request the next list of machine learning workspaces.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.Workspace]
- :keyword next_link: The URI that can be used to request the next list of machine learning
- workspaces.
- :paramtype next_link: str
"""
super().__init__(**kwargs)
- self.value = value
self.next_link = next_link
+ self.value = value
+
+
+class WorkspacePrivateEndpointResource(_serialization.Model):
+ """The Private Endpoint resource.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: e.g.
+ /subscriptions/{networkSubscriptionId}/resourceGroups/{rgName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}.
+ :vartype id: str
+ :ivar subnet_arm_id: The subnetId that the private endpoint is connected to.
+ :vartype subnet_arm_id: str
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "subnet_arm_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "subnet_arm_id": {"key": "subnetArmId", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.id = None
+ self.subnet_arm_id = None
class WorkspaceUpdateParameters(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""The parameters for updating a machine learning workspace.
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar sku: Optional. This field is required to be implemented by the RP because AML is
+ supporting more than one tier.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
:ivar tags: The resource tags for the machine learning workspace.
:vartype tags: dict[str, str]
- :ivar sku: The sku of the workspace.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :ivar identity: The identity of the resource.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar application_insights: ARM id of the application insights associated with this workspace.
+ :vartype application_insights: str
+ :ivar container_registry: ARM id of the container registry associated with this workspace.
+ :vartype container_registry: str
:ivar description: The description of this workspace.
:vartype description: str
- :ivar friendly_name: The friendly name for this workspace.
+ :ivar enable_data_isolation:
+ :vartype enable_data_isolation: bool
+ :ivar encryption:
+ :vartype encryption: ~azure.mgmt.machinelearningservices.models.EncryptionUpdateProperties
+ :ivar feature_store_settings: Settings for feature store type workspace.
+ :vartype feature_store_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureStoreSettings
+ :ivar friendly_name: The friendly name for this workspace. This name in mutable.
:vartype friendly_name: str
:ivar image_build_compute: The compute name for image build.
:vartype image_build_compute: str
- :ivar service_managed_resources_settings: The service managed resource settings.
- :vartype service_managed_resources_settings:
- ~azure.mgmt.machinelearningservices.models.ServiceManagedResourcesSettings
+ :ivar managed_network: Managed Network settings for a machine learning workspace.
+ :vartype managed_network: ~azure.mgmt.machinelearningservices.models.ManagedNetworkSettings
:ivar primary_user_assigned_identity: The user assigned identity resource id that represents
the workspace identity.
:vartype primary_user_assigned_identity: str
:ivar public_network_access: Whether requests from Public Network are allowed. Known values
are: "Enabled" and "Disabled".
:vartype public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.PublicNetworkAccess
- :ivar application_insights: ARM id of the application insights associated with this workspace.
- :vartype application_insights: str
- :ivar container_registry: ARM id of the container registry associated with this workspace.
- :vartype container_registry: str
+ ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
+ :ivar serverless_compute_custom_subnet: The resource ID of an existing virtual network subnet
+ in which serverless compute nodes should be deployed.
+ :vartype serverless_compute_custom_subnet: str
+ :ivar serverless_compute_no_public_ip: The flag to signal if serverless compute nodes deployed
+ in custom vNet would have no public IP addresses for a workspace with private endpoint.
+ :vartype serverless_compute_no_public_ip: bool
+ :ivar service_managed_resources_settings: The service managed resource settings.
+ :vartype service_managed_resources_settings:
+ ~azure.mgmt.machinelearningservices.models.ServiceManagedResourcesSettings
+ :ivar soft_delete_retention_in_days: Retention time in days after workspace get soft deleted.
+ :vartype soft_delete_retention_in_days: int
+ :ivar v1_legacy_mode: Enabling v1_legacy_mode may prevent you from using features provided by
+ the v2 API.
+ :vartype v1_legacy_mode: bool
"""
_attribute_map = {
- "tags": {"key": "tags", "type": "{str}"},
- "sku": {"key": "sku", "type": "Sku"},
"identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "sku": {"key": "sku", "type": "Sku"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "application_insights": {"key": "properties.applicationInsights", "type": "str"},
+ "container_registry": {"key": "properties.containerRegistry", "type": "str"},
"description": {"key": "properties.description", "type": "str"},
+ "enable_data_isolation": {"key": "properties.enableDataIsolation", "type": "bool"},
+ "encryption": {"key": "properties.encryption", "type": "EncryptionUpdateProperties"},
+ "feature_store_settings": {"key": "properties.featureStoreSettings", "type": "FeatureStoreSettings"},
"friendly_name": {"key": "properties.friendlyName", "type": "str"},
"image_build_compute": {"key": "properties.imageBuildCompute", "type": "str"},
+ "managed_network": {"key": "properties.managedNetwork", "type": "ManagedNetworkSettings"},
+ "primary_user_assigned_identity": {"key": "properties.primaryUserAssignedIdentity", "type": "str"},
+ "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
+ "serverless_compute_custom_subnet": {"key": "properties.serverlessComputeCustomSubnet", "type": "str"},
+ "serverless_compute_no_public_ip": {"key": "properties.serverlessComputeNoPublicIP", "type": "bool"},
"service_managed_resources_settings": {
"key": "properties.serviceManagedResourcesSettings",
"type": "ServiceManagedResourcesSettings",
},
- "primary_user_assigned_identity": {"key": "properties.primaryUserAssignedIdentity", "type": "str"},
- "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
- "application_insights": {"key": "properties.applicationInsights", "type": "str"},
- "container_registry": {"key": "properties.containerRegistry", "type": "str"},
+ "soft_delete_retention_in_days": {"key": "properties.softDeleteRetentionInDays", "type": "int"},
+ "v1_legacy_mode": {"key": "properties.v1LegacyMode", "type": "bool"},
}
def __init__(
self,
*,
- tags: Optional[Dict[str, str]] = None,
- sku: Optional["_models.Sku"] = None,
identity: Optional["_models.ManagedServiceIdentity"] = None,
+ sku: Optional["_models.Sku"] = None,
+ tags: Optional[Dict[str, str]] = None,
+ application_insights: Optional[str] = None,
+ container_registry: Optional[str] = None,
description: Optional[str] = None,
+ enable_data_isolation: Optional[bool] = None,
+ encryption: Optional["_models.EncryptionUpdateProperties"] = None,
+ feature_store_settings: Optional["_models.FeatureStoreSettings"] = None,
friendly_name: Optional[str] = None,
image_build_compute: Optional[str] = None,
- service_managed_resources_settings: Optional["_models.ServiceManagedResourcesSettings"] = None,
+ managed_network: Optional["_models.ManagedNetworkSettings"] = None,
primary_user_assigned_identity: Optional[str] = None,
- public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
- application_insights: Optional[str] = None,
- container_registry: Optional[str] = None,
+ public_network_access: Optional[Union[str, "_models.PublicNetworkAccessType"]] = None,
+ serverless_compute_custom_subnet: Optional[str] = None,
+ serverless_compute_no_public_ip: Optional[bool] = None,
+ service_managed_resources_settings: Optional["_models.ServiceManagedResourcesSettings"] = None,
+ soft_delete_retention_in_days: Optional[int] = None,
+ v1_legacy_mode: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword sku: Optional. This field is required to be implemented by the RP because AML is
+ supporting more than one tier.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
:keyword tags: The resource tags for the machine learning workspace.
:paramtype tags: dict[str, str]
- :keyword sku: The sku of the workspace.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :keyword identity: The identity of the resource.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword application_insights: ARM id of the application insights associated with this
+ workspace.
+ :paramtype application_insights: str
+ :keyword container_registry: ARM id of the container registry associated with this workspace.
+ :paramtype container_registry: str
:keyword description: The description of this workspace.
:paramtype description: str
- :keyword friendly_name: The friendly name for this workspace.
+ :keyword enable_data_isolation:
+ :paramtype enable_data_isolation: bool
+ :keyword encryption:
+ :paramtype encryption: ~azure.mgmt.machinelearningservices.models.EncryptionUpdateProperties
+ :keyword feature_store_settings: Settings for feature store type workspace.
+ :paramtype feature_store_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureStoreSettings
+ :keyword friendly_name: The friendly name for this workspace. This name in mutable.
:paramtype friendly_name: str
:keyword image_build_compute: The compute name for image build.
:paramtype image_build_compute: str
- :keyword service_managed_resources_settings: The service managed resource settings.
- :paramtype service_managed_resources_settings:
- ~azure.mgmt.machinelearningservices.models.ServiceManagedResourcesSettings
+ :keyword managed_network: Managed Network settings for a machine learning workspace.
+ :paramtype managed_network: ~azure.mgmt.machinelearningservices.models.ManagedNetworkSettings
:keyword primary_user_assigned_identity: The user assigned identity resource id that represents
the workspace identity.
:paramtype primary_user_assigned_identity: str
:keyword public_network_access: Whether requests from Public Network are allowed. Known values
are: "Enabled" and "Disabled".
:paramtype public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.PublicNetworkAccess
- :keyword application_insights: ARM id of the application insights associated with this
- workspace.
- :paramtype application_insights: str
- :keyword container_registry: ARM id of the container registry associated with this workspace.
- :paramtype container_registry: str
+ ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
+ :keyword serverless_compute_custom_subnet: The resource ID of an existing virtual network
+ subnet in which serverless compute nodes should be deployed.
+ :paramtype serverless_compute_custom_subnet: str
+ :keyword serverless_compute_no_public_ip: The flag to signal if serverless compute nodes
+ deployed in custom vNet would have no public IP addresses for a workspace with private
+ endpoint.
+ :paramtype serverless_compute_no_public_ip: bool
+ :keyword service_managed_resources_settings: The service managed resource settings.
+ :paramtype service_managed_resources_settings:
+ ~azure.mgmt.machinelearningservices.models.ServiceManagedResourcesSettings
+ :keyword soft_delete_retention_in_days: Retention time in days after workspace get soft
+ deleted.
+ :paramtype soft_delete_retention_in_days: int
+ :keyword v1_legacy_mode: Enabling v1_legacy_mode may prevent you from using features provided
+ by the v2 API.
+ :paramtype v1_legacy_mode: bool
"""
super().__init__(**kwargs)
- self.tags = tags
- self.sku = sku
self.identity = identity
+ self.sku = sku
+ self.tags = tags
+ self.application_insights = application_insights
+ self.container_registry = container_registry
self.description = description
+ self.enable_data_isolation = enable_data_isolation
+ self.encryption = encryption
+ self.feature_store_settings = feature_store_settings
self.friendly_name = friendly_name
self.image_build_compute = image_build_compute
- self.service_managed_resources_settings = service_managed_resources_settings
+ self.managed_network = managed_network
self.primary_user_assigned_identity = primary_user_assigned_identity
self.public_network_access = public_network_access
- self.application_insights = application_insights
- self.container_registry = container_registry
+ self.serverless_compute_custom_subnet = serverless_compute_custom_subnet
+ self.serverless_compute_no_public_ip = serverless_compute_no_public_ip
+ self.service_managed_resources_settings = service_managed_resources_settings
+ self.soft_delete_retention_in_days = soft_delete_retention_in_days
+ self.v1_legacy_mode = v1_legacy_mode
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/__init__.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/__init__.py
index 4967e3af6930..1421ad90d913 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/__init__.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/__init__.py
@@ -6,15 +6,11 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from ._operations import Operations
-from ._workspaces_operations import WorkspacesOperations
from ._usages_operations import UsagesOperations
from ._virtual_machine_sizes_operations import VirtualMachineSizesOperations
from ._quotas_operations import QuotasOperations
from ._compute_operations import ComputeOperations
-from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
-from ._private_link_resources_operations import PrivateLinkResourcesOperations
-from ._workspace_connections_operations import WorkspaceConnectionsOperations
+from ._capacity_reservation_groups_operations import CapacityReservationGroupsOperations
from ._registry_code_containers_operations import RegistryCodeContainersOperations
from ._registry_code_versions_operations import RegistryCodeVersionsOperations
from ._registry_component_containers_operations import RegistryComponentContainersOperations
@@ -36,29 +32,39 @@
from ._datastores_operations import DatastoresOperations
from ._environment_containers_operations import EnvironmentContainersOperations
from ._environment_versions_operations import EnvironmentVersionsOperations
+from ._featureset_containers_operations import FeaturesetContainersOperations
+from ._features_operations import FeaturesOperations
+from ._featureset_versions_operations import FeaturesetVersionsOperations
+from ._featurestore_entity_containers_operations import FeaturestoreEntityContainersOperations
+from ._featurestore_entity_versions_operations import FeaturestoreEntityVersionsOperations
from ._jobs_operations import JobsOperations
+from ._labeling_jobs_operations import LabelingJobsOperations
from ._model_containers_operations import ModelContainersOperations
from ._model_versions_operations import ModelVersionsOperations
from ._online_endpoints_operations import OnlineEndpointsOperations
from ._online_deployments_operations import OnlineDeploymentsOperations
from ._schedules_operations import SchedulesOperations
+from ._serverless_endpoints_operations import ServerlessEndpointsOperations
from ._registries_operations import RegistriesOperations
from ._workspace_features_operations import WorkspaceFeaturesOperations
+from ._operations import Operations
+from ._workspaces_operations import WorkspacesOperations
+from ._workspace_connections_operations import WorkspaceConnectionsOperations
+from ._managed_network_settings_rule_operations import ManagedNetworkSettingsRuleOperations
+from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
+from ._private_link_resources_operations import PrivateLinkResourcesOperations
+from ._managed_network_provisions_operations import ManagedNetworkProvisionsOperations
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
- "Operations",
- "WorkspacesOperations",
"UsagesOperations",
"VirtualMachineSizesOperations",
"QuotasOperations",
"ComputeOperations",
- "PrivateEndpointConnectionsOperations",
- "PrivateLinkResourcesOperations",
- "WorkspaceConnectionsOperations",
+ "CapacityReservationGroupsOperations",
"RegistryCodeContainersOperations",
"RegistryCodeVersionsOperations",
"RegistryComponentContainersOperations",
@@ -80,14 +86,28 @@
"DatastoresOperations",
"EnvironmentContainersOperations",
"EnvironmentVersionsOperations",
+ "FeaturesetContainersOperations",
+ "FeaturesOperations",
+ "FeaturesetVersionsOperations",
+ "FeaturestoreEntityContainersOperations",
+ "FeaturestoreEntityVersionsOperations",
"JobsOperations",
+ "LabelingJobsOperations",
"ModelContainersOperations",
"ModelVersionsOperations",
"OnlineEndpointsOperations",
"OnlineDeploymentsOperations",
"SchedulesOperations",
+ "ServerlessEndpointsOperations",
"RegistriesOperations",
"WorkspaceFeaturesOperations",
+ "Operations",
+ "WorkspacesOperations",
+ "WorkspaceConnectionsOperations",
+ "ManagedNetworkSettingsRuleOperations",
+ "PrivateEndpointConnectionsOperations",
+ "PrivateLinkResourcesOperations",
+ "ManagedNetworkProvisionsOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_deployments_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_deployments_operations.py
index 7fcc2d9cfc00..188d3c484b29 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_deployments_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_deployments_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +53,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +72,7 @@ def build_list_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -100,7 +100,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -120,7 +120,7 @@ def build_delete_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -142,7 +142,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -162,7 +162,7 @@ def build_get_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -184,7 +184,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -209,7 +209,7 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -233,7 +233,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -258,7 +258,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_endpoints_operations.py
index afa2aebfed20..b8b914a193a4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_endpoints_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_endpoints_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +51,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +69,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +90,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -109,7 +109,7 @@ def build_delete_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -126,7 +126,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -145,7 +145,7 @@ def build_get_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -162,7 +162,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -184,7 +184,7 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -203,7 +203,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -225,7 +225,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -244,7 +244,7 @@ def build_list_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -263,7 +263,7 @@ def build_list_keys_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_capacity_reservation_groups_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_capacity_reservation_groups_operations.py
new file mode 100644
index 000000000000..178b8ad90f39
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_capacity_reservation_groups_operations.py
@@ -0,0 +1,850 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_capacity_reservation_groups_list_by_subscription_request(
+ subscription_id: str, *, skip: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_request(
+ resource_group_name: str, subscription_id: str, *, skip: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(resource_group_name: str, group_id: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "groupId": _SERIALIZER.url("group_id", group_id, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(resource_group_name: str, group_id: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "groupId": _SERIALIZER.url("group_id", group_id, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_request(resource_group_name: str, group_id: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "groupId": _SERIALIZER.url("group_id", group_id, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, group_id: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "groupId": _SERIALIZER.url("group_id", group_id, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class CapacityReservationGroupsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`capacity_reservation_groups` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def capacity_reservation_groups_list_by_subscription(
+ self, skip: Optional[str] = None, **kwargs: Any
+ ) -> Iterable["_models.CapacityReservationGroup"]:
+ """capacity_reservation_groups_list_by_subscription.
+
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either CapacityReservationGroup or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.CapacityReservationGroup]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CapacityReservationGroupTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_capacity_reservation_groups_list_by_subscription_request(
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.capacity_reservation_groups_list_by_subscription.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize(
+ "CapacityReservationGroupTrackedResourceArmPaginatedResult", pipeline_response
+ )
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ capacity_reservation_groups_list_by_subscription.metadata = {
+ "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups"
+ }
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, skip: Optional[str] = None, **kwargs: Any
+ ) -> Iterable["_models.CapacityReservationGroup"]:
+ """list.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either CapacityReservationGroup or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.CapacityReservationGroup]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CapacityReservationGroupTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize(
+ "CapacityReservationGroupTrackedResourceArmPaginatedResult", pipeline_response
+ )
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups"
+ }
+
+ @distributed_trace
+ def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, group_id: str, **kwargs: Any
+ ) -> None:
+ """delete.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ group_id=group_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.delete.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}"
+ }
+
+ @distributed_trace
+ def get(self, resource_group_name: str, group_id: str, **kwargs: Any) -> _models.CapacityReservationGroup:
+ """get.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: CapacityReservationGroup or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CapacityReservationGroup] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ group_id=group_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}"
+ }
+
+ @overload
+ def update( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: _models.PartialMinimalTrackedResourceWithSkuAndIdentity,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def update( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def update( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO],
+ **kwargs: Any
+ ) -> None:
+ """update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Is either a PartialMinimalTrackedResourceWithSkuAndIdentity type or a IO type.
+ Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity or
+ IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PartialMinimalTrackedResourceWithSkuAndIdentity")
+
+ request = build_update_request(
+ resource_group_name=resource_group_name,
+ group_id=group_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}"
+ }
+
+ @overload
+ def create_or_update(
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: _models.CapacityReservationGroup,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.CapacityReservationGroup:
+ """create_or_update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: CapacityReservationGroup or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def create_or_update(
+ self,
+ resource_group_name: str,
+ group_id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.CapacityReservationGroup:
+ """create_or_update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: CapacityReservationGroup or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def create_or_update(
+ self, resource_group_name: str, group_id: str, body: Union[_models.CapacityReservationGroup, IO], **kwargs: Any
+ ) -> _models.CapacityReservationGroup:
+ """create_or_update.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param group_id: Required.
+ :type group_id: str
+ :param body: Is either a CapacityReservationGroup type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: CapacityReservationGroup or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.CapacityReservationGroup
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.CapacityReservationGroup] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "CapacityReservationGroup")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ group_id=group_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.create_or_update.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if response.status_code == 200:
+ deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
+
+ if response.status_code == 201:
+ deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/capacityReserverationGroups/{groupId}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_containers_operations.py
index bd3a7ea7d400..9c6b9fa84aaa 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_containers_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -43,7 +43,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -61,7 +61,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -80,7 +80,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -99,7 +99,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -116,7 +116,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -135,7 +135,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -152,7 +152,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -172,7 +172,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_versions_operations.py
index 85aa291231f0..ca2653f0bf77 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_versions_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +53,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +72,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -99,7 +99,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -119,7 +119,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -136,7 +136,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -156,7 +156,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -173,7 +173,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -194,7 +194,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -213,7 +213,7 @@ def build_create_or_get_start_pending_upload_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -234,7 +234,7 @@ def build_create_or_get_start_pending_upload_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_containers_operations.py
index 0bc38057f344..3e3abda2fb72 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_containers_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -49,7 +49,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -67,7 +67,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -88,7 +88,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -107,7 +107,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -124,7 +124,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -143,7 +143,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -160,7 +160,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -180,7 +180,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_versions_operations.py
index 932e842e43de..4c2c245686e4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_versions_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -47,12 +47,13 @@ def build_list_request(
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -71,7 +72,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -83,6 +84,8 @@ def build_list_request(
_params["$skip"] = _SERIALIZER.query("skip", skip, "str")
if list_view_type is not None:
_params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -96,7 +99,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -116,7 +119,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -133,7 +136,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -153,7 +156,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -170,7 +173,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -191,7 +194,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -233,6 +236,7 @@ def list(
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.ComponentVersion"]:
"""List component versions.
@@ -255,6 +259,8 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: Component stage. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentVersion or the result of cls(response)
:rtype:
@@ -287,6 +293,7 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_compute_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_compute_operations.py
index 349245e851f3..f97c30107af4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_compute_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_compute_operations.py
@@ -7,7 +7,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -45,7 +45,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -63,7 +63,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -82,7 +82,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -98,10 +98,12 @@ def build_get_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -118,7 +120,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -135,10 +137,12 @@ def build_create_or_update_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -157,7 +161,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -174,10 +178,12 @@ def build_update_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -202,7 +208,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -218,10 +224,12 @@ def build_delete_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -235,13 +243,54 @@ def build_delete_request(
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+def build_update_custom_services_request(
+ resource_group_name: str, workspace_name: str, compute_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/customServices",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
def build_list_nodes_request(
resource_group_name: str, workspace_name: str, compute_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -257,10 +306,12 @@ def build_list_nodes_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -277,7 +328,7 @@ def build_list_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -293,10 +344,12 @@ def build_list_keys_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -313,7 +366,7 @@ def build_start_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -329,10 +382,12 @@ def build_start_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -349,7 +404,7 @@ def build_stop_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -365,10 +420,12 @@ def build_stop_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -385,7 +442,7 @@ def build_restart_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -401,21 +458,143 @@ def build_restart_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_idle_shutdown_setting_request(
+ resource_group_name: str, workspace_name: str, compute_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/updateIdleShutdownSetting",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_allowed_resize_sizes_request(
+ resource_group_name: str, workspace_name: str, compute_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/getAllowedVmSizesForResize",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_resize_request(
+ resource_group_name: str, workspace_name: str, compute_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/resize",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "computeName": _SERIALIZER.url(
+ "compute_name", compute_name, "str", pattern=r"^[a-zA-Z](?![a-zA-Z0-9-]*-\d+$)[a-zA-Z0-9\-]{2,23}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
-class ComputeOperations:
+class ComputeOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
@@ -1233,11 +1412,18 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
}
- @distributed_trace
- def list_nodes(
- self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
- ) -> Iterable["_models.AmlComputeNodeInformation"]:
- """Get the details (e.g IP address, port etc) of all the compute nodes in the compute.
+ @overload
+ def update_custom_services( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ custom_services: List[_models.CustomService],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """Updates the custom services list. The list of custom services provided shall be overwritten.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
@@ -1246,19 +1432,77 @@ def list_nodes(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
+ :param custom_services: New list of Custom Services. Required.
+ :type custom_services: list[~azure.mgmt.machinelearningservices.models.CustomService]
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either AmlComputeNodeInformation or the result of
- cls(response)
- :rtype:
- ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.AmlComputeNodeInformation]
+ :return: None or the result of cls(response)
+ :rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.AmlComputeNodesInformation] = kwargs.pop("cls", None)
+ @overload
+ def update_custom_services( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ custom_services: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """Updates the custom services list. The list of custom services provided shall be overwritten.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param custom_services: New list of Custom Services. Required.
+ :type custom_services: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def update_custom_services( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ custom_services: Union[List[_models.CustomService], IO],
+ **kwargs: Any
+ ) -> None:
+ """Updates the custom services list. The list of custom services provided shall be overwritten.
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param custom_services: New list of Custom Services. Is either a [CustomService] type or a IO
+ type. Required.
+ :type custom_services: list[~azure.mgmt.machinelearningservices.models.CustomService] or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -1267,41 +1511,125 @@ def list_nodes(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- def prepare_request(next_link=None):
- if not next_link:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- request = build_list_nodes_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- compute_name=compute_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.list_nodes.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
- else:
- # make call to next link with the client's api-version
- _parsed_next_link = urllib.parse.urlparse(next_link)
- _next_request_params = case_insensitive_dict(
- {
- key: [urllib.parse.quote(v) for v in value]
- for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
- }
- )
- _next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
- "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(custom_services, (IOBase, bytes)):
+ _content = custom_services
+ else:
+ _json = self._serialize.body(custom_services, "[CustomService]")
- def extract_data(pipeline_response):
+ request = build_update_custom_services_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update_custom_services.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ update_custom_services.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/customServices"
+ }
+
+ @distributed_trace
+ def list_nodes(
+ self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
+ ) -> Iterable["_models.AmlComputeNodeInformation"]:
+ """Get the details (e.g IP address, port etc) of all the compute nodes in the compute.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either AmlComputeNodeInformation or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.AmlComputeNodeInformation]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.AmlComputeNodesInformation] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_nodes_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list_nodes.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
deserialized = self._deserialize("AmlComputeNodesInformation", pipeline_response)
list_of_elem = deserialized.nodes
if cls:
@@ -1751,3 +2079,452 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
begin_restart.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/restart"
}
+
+ @overload
+ def update_idle_shutdown_setting( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: _models.IdleShutdownSetting,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """Updates the idle shutdown setting of a compute instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating idle shutdown setting of specified ComputeInstance.
+ Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.IdleShutdownSetting
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def update_idle_shutdown_setting( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> None:
+ """Updates the idle shutdown setting of a compute instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating idle shutdown setting of specified ComputeInstance.
+ Required.
+ :type parameters: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def update_idle_shutdown_setting( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: Union[_models.IdleShutdownSetting, IO],
+ **kwargs: Any
+ ) -> None:
+ """Updates the idle shutdown setting of a compute instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating idle shutdown setting of specified ComputeInstance.
+ Is either a IdleShutdownSetting type or a IO type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.IdleShutdownSetting or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "IdleShutdownSetting")
+
+ request = build_update_idle_shutdown_setting_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update_idle_shutdown_setting.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ update_idle_shutdown_setting.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/updateIdleShutdownSetting"
+ }
+
+ @distributed_trace
+ def get_allowed_resize_sizes(
+ self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
+ ) -> _models.VirtualMachineSizeListResult:
+ """Returns supported virtual machine sizes for resize.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: VirtualMachineSizeListResult or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.VirtualMachineSizeListResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.VirtualMachineSizeListResult] = kwargs.pop("cls", None)
+
+ request = build_get_allowed_resize_sizes_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get_allowed_resize_sizes.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get_allowed_resize_sizes.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/getAllowedVmSizesForResize"
+ }
+
+ def _resize_initial( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: Union[_models.ResizeSchema, IO],
+ **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ResizeSchema")
+
+ request = build_resize_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._resize_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _resize_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/resize"
+ }
+
+ @overload
+ def begin_resize(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: _models.ResizeSchema,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Updates the size of a Compute Instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating VM size setting of specified Compute Instance.
+ Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.ResizeSchema
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_resize(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Updates the size of a Compute Instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating VM size setting of specified Compute Instance.
+ Required.
+ :type parameters: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_resize(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ compute_name: str,
+ parameters: Union[_models.ResizeSchema, IO],
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Updates the size of a Compute Instance.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param compute_name: Name of the Azure Machine Learning compute. Required.
+ :type compute_name: str
+ :param parameters: The object for updating VM size setting of specified Compute Instance. Is
+ either a ResizeSchema type or a IO type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.ResizeSchema or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._resize_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ compute_name=compute_name,
+ parameters=parameters,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_resize.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/resize"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_containers_operations.py
index 48c5dee07f2c..db399057899d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_containers_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -49,7 +49,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -67,7 +67,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -88,7 +88,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -107,7 +107,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -124,7 +124,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -143,7 +143,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -160,7 +160,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -180,7 +180,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_versions_operations.py
index e355736e9350..8fdd1c819812 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_versions_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -48,12 +48,13 @@ def build_list_request(
skip: Optional[str] = None,
tags: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +73,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -86,6 +87,8 @@ def build_list_request(
_params["$tags"] = _SERIALIZER.query("tags", tags, "str")
if list_view_type is not None:
_params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -99,7 +102,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -119,7 +122,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -136,7 +139,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -156,7 +159,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -173,7 +176,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -194,7 +197,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -237,6 +240,7 @@ def list(
skip: Optional[str] = None,
tags: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.DataVersionBase"]:
"""List data versions in the data container.
@@ -266,6 +270,8 @@ def list(
ListViewType.All]View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: data stage. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataVersionBase or the result of cls(response)
:rtype:
@@ -299,6 +305,7 @@ def prepare_request(next_link=None):
skip=skip,
tags=tags,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_datastores_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_datastores_operations.py
index 651ac883ae0f..6c36aea5efd2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_datastores_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_datastores_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -54,7 +54,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +72,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -103,7 +103,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -122,7 +122,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -139,7 +139,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -158,7 +158,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -181,7 +181,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -201,7 +201,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -222,7 +222,7 @@ def build_list_secrets_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -241,7 +241,7 @@ def build_list_secrets_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_containers_operations.py
index d65ee4225ec4..d87d9b92165c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_containers_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -49,7 +49,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -67,7 +67,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -88,7 +88,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -107,7 +107,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -124,7 +124,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -143,7 +143,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -160,7 +160,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -180,7 +180,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_versions_operations.py
index fc163b0b3bb7..1b5a08843c57 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_versions_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -47,12 +47,13 @@ def build_list_request(
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -71,7 +72,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -83,6 +84,8 @@ def build_list_request(
_params["$skip"] = _SERIALIZER.query("skip", skip, "str")
if list_view_type is not None:
_params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -96,7 +99,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -116,7 +119,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -133,7 +136,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -153,7 +156,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -170,7 +173,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -191,7 +194,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -233,6 +236,7 @@ def list(
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.EnvironmentVersion"]:
"""List versions.
@@ -255,6 +259,9 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: Stage for including/excluding (for example) archived entities. Takes priority
+ over listViewType. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentVersion or the result of cls(response)
:rtype:
@@ -287,6 +294,7 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_features_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_features_operations.py
new file mode 100644
index 000000000000..c2851df51c14
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_features_operations.py
@@ -0,0 +1,381 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ feature_name: Optional[str] = None,
+ description: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 1000,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "featuresetName": _SERIALIZER.url(
+ "featureset_name", featureset_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"
+ ),
+ "featuresetVersion": _SERIALIZER.url("featureset_version", featureset_version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if feature_name is not None:
+ _params["featureName"] = _SERIALIZER.query("feature_name", feature_name, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ feature_name: str,
+ subscription_id: str,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features/{featureName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "featuresetName": _SERIALIZER.url(
+ "featureset_name", featureset_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"
+ ),
+ "featuresetVersion": _SERIALIZER.url("featureset_version", featureset_version, "str"),
+ "featureName": _SERIALIZER.url(
+ "feature_name", feature_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`features` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ feature_name: Optional[str] = None,
+ description: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 1000,
+ **kwargs: Any
+ ) -> Iterable["_models.Feature"]:
+ """List Features.
+
+ List Features.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param featureset_name: Featureset name. This is case-sensitive. Required.
+ :type featureset_name: str
+ :param featureset_version: Featureset Version identifier. This is case-sensitive. Required.
+ :type featureset_version: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param feature_name: feature name. Default value is None.
+ :type feature_name: str
+ :param description: Description of the featureset. Default value is None.
+ :type description: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: Page size. Default value is 1000.
+ :type page_size: int
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either Feature or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Feature]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeatureResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ featureset_name=featureset_name,
+ featureset_version=featureset_version,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ feature_name=feature_name,
+ description=description,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeatureResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features"
+ }
+
+ @distributed_trace
+ def get(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ feature_name: str,
+ **kwargs: Any
+ ) -> _models.Feature:
+ """Get feature.
+
+ Get feature.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param featureset_name: Feature set name. This is case-sensitive. Required.
+ :type featureset_name: str
+ :param featureset_version: Feature set version identifier. This is case-sensitive. Required.
+ :type featureset_version: str
+ :param feature_name: Feature Name. This is case-sensitive. Required.
+ :type feature_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: Feature or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.Feature
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.Feature] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ featureset_name=featureset_name,
+ featureset_version=featureset_version,
+ feature_name=feature_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Feature", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features/{featureName}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_containers_operations.py
new file mode 100644
index 000000000000..23e64fa6c217
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_containers_operations.py
@@ -0,0 +1,814 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+ if name is not None:
+ _params["name"] = _SERIALIZER.query("name", name, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if created_by is not None:
+ _params["createdBy"] = _SERIALIZER.query("created_by", created_by, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_entity_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturesetContainersOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`featureset_containers` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.FeaturesetContainer"]:
+ """List featurestore entity containers.
+
+ List featurestore entity containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param name: name for the featureset. Default value is None.
+ :type name: str
+ :param description: description for the feature set. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FeaturesetContainer or the result of cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ name=name,
+ description=description,
+ created_by=created_by,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturesetContainerResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets"
+ }
+
+ def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
+
+ @distributed_trace
+ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> LROPoller[None]:
+ """Delete container.
+
+ Delete container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
+
+ @distributed_trace
+ def get_entity(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.FeaturesetContainer:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: FeaturesetContainer or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+
+ request = build_get_entity_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get_entity.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get_entity.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturesetContainer, IO],
+ **kwargs: Any
+ ) -> _models.FeaturesetContainer:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetContainer")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.FeaturesetContainer,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturesetContainer, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Is either a FeaturesetContainer type or a IO
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_versions_operations.py
new file mode 100644
index 000000000000..96716fd615a8
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_versions_operations.py
@@ -0,0 +1,1162 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+ if version_name is not None:
+ _params["versionName"] = _SERIALIZER.query("version_name", version_name, "str")
+ if version is not None:
+ _params["version"] = _SERIALIZER.query("version", version, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if created_by is not None:
+ _params["createdBy"] = _SERIALIZER.query("created_by", created_by, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_backfill_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}/backfill",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturesetVersionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`featureset_versions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.FeaturesetVersion"]:
+ """List versions.
+
+ List versions.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Featureset name. This is case-sensitive. Required.
+ :type name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param version_name: name for the featureset version. Default value is None.
+ :type version_name: str
+ :param version: featureset version. Default value is None.
+ :type version: str
+ :param description: description for the feature set version. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :param stage: Specifies the featurestore stage. Default value is None.
+ :type stage: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FeaturesetVersion or the result of cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ version_name=version_name,
+ version=version,
+ description=description,
+ created_by=created_by,
+ stage=stage,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions"
+ }
+
+ def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Delete version.
+
+ Delete version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> _models.FeaturesetVersion:
+ """Get version.
+
+ Get version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: FeaturesetVersion or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersion, IO],
+ **kwargs: Any
+ ) -> _models.FeaturesetVersion:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetVersion")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturesetVersion,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersion, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Is either a FeaturesetVersion type or a IO
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}"
+ }
+
+ def _backfill_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersionBackfillRequest, IO],
+ **kwargs: Any
+ ) -> Optional[_models.FeaturesetVersionBackfillResponse]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.FeaturesetVersionBackfillResponse]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetVersionBackfillRequest")
+
+ request = build_backfill_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._backfill_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturesetVersionBackfillResponse", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _backfill_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}/backfill"
+ }
+
+ @overload
+ def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturesetVersionBackfillRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersionBackfillResponse]:
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturesetVersionBackfillResponse or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersionBackfillResponse]:
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturesetVersionBackfillResponse or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersionBackfillRequest, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersionBackfillResponse]:
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Is either a
+ FeaturesetVersionBackfillRequest type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillRequest or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturesetVersionBackfillResponse or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersionBackfillResponse] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._backfill_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersionBackfillResponse", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_backfill.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}/backfill"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_containers_operations.py
new file mode 100644
index 000000000000..e65238c3984e
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_containers_operations.py
@@ -0,0 +1,815 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+ if name is not None:
+ _params["name"] = _SERIALIZER.query("name", name, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if created_by is not None:
+ _params["createdBy"] = _SERIALIZER.query("created_by", created_by, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_entity_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturestoreEntityContainersOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`featurestore_entity_containers` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.FeaturestoreEntityContainer"]:
+ """List featurestore entity containers.
+
+ List featurestore entity containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param name: name for the featurestore entity. Default value is None.
+ :type name: str
+ :param description: description for the featurestore entity. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FeaturestoreEntityContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ name=name,
+ description=description,
+ created_by=created_by,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityContainerResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities"
+ }
+
+ def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
+
+ @distributed_trace
+ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> LROPoller[None]:
+ """Delete container.
+
+ Delete container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
+
+ @distributed_trace
+ def get_entity(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.FeaturestoreEntityContainer:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: FeaturestoreEntityContainer or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+
+ request = build_get_entity_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get_entity.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get_entity.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturestoreEntityContainer, IO],
+ **kwargs: Any
+ ) -> _models.FeaturestoreEntityContainer:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturestoreEntityContainer")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.FeaturestoreEntityContainer,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturestoreEntityContainer or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturestoreEntityContainer or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturestoreEntityContainer, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Is either a FeaturestoreEntityContainer type
+ or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturestoreEntityContainer or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_versions_operations.py
new file mode 100644
index 000000000000..844aa52e4f70
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_versions_operations.py
@@ -0,0 +1,859 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+ if version_name is not None:
+ _params["versionName"] = _SERIALIZER.query("version_name", version_name, "str")
+ if version is not None:
+ _params["version"] = _SERIALIZER.query("version", version, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if created_by is not None:
+ _params["createdBy"] = _SERIALIZER.query("created_by", created_by, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturestoreEntityVersionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`featurestore_entity_versions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.FeaturestoreEntityVersion"]:
+ """List versions.
+
+ List versions.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Feature entity name. This is case-sensitive. Required.
+ :type name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param version_name: name for the featurestore entity version. Default value is None.
+ :type version_name: str
+ :param version: featurestore entity version. Default value is None.
+ :type version: str
+ :param description: description for the feature entity version. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :param stage: Specifies the featurestore stage. Default value is None.
+ :type stage: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FeaturestoreEntityVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ version_name=version_name,
+ version=version,
+ description=description,
+ created_by=created_by,
+ stage=stage,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityVersionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions"
+ }
+
+ def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Delete version.
+
+ Delete version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> _models.FeaturestoreEntityVersion:
+ """Get version.
+
+ Get version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: FeaturestoreEntityVersion or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturestoreEntityVersion, IO],
+ **kwargs: Any
+ ) -> _models.FeaturestoreEntityVersion:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturestoreEntityVersion")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturestoreEntityVersion,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturestoreEntityVersion or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturestoreEntityVersion or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturestoreEntityVersion, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Is either a FeaturestoreEntityVersion type or
+ a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either FeaturestoreEntityVersion or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_jobs_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_jobs_operations.py
index 83dc12a1ff03..94b6f11b678c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_jobs_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_jobs_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -48,12 +48,16 @@ def build_list_request(
job_type: Optional[str] = None,
tag: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ asset_name: Optional[str] = None,
+ scheduled: Optional[bool] = None,
+ schedule_id: Optional[str] = None,
+ properties: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -71,7 +75,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -83,6 +87,14 @@ def build_list_request(
_params["tag"] = _SERIALIZER.query("tag", tag, "str")
if list_view_type is not None:
_params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if asset_name is not None:
+ _params["assetName"] = _SERIALIZER.query("asset_name", asset_name, "str")
+ if scheduled is not None:
+ _params["scheduled"] = _SERIALIZER.query("scheduled", scheduled, "bool")
+ if schedule_id is not None:
+ _params["scheduleId"] = _SERIALIZER.query("schedule_id", schedule_id, "str")
+ if properties is not None:
+ _params["properties"] = _SERIALIZER.query("properties", properties, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -96,7 +108,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -115,7 +127,7 @@ def build_delete_request(
"id": _SERIALIZER.url("id", id, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -132,7 +144,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -151,7 +163,7 @@ def build_get_request(
"id": _SERIALIZER.url("id", id, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -162,13 +174,52 @@ def build_get_request(
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+def build_update_request(
+ resource_group_name: str, workspace_name: str, id: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "id": _SERIALIZER.url("id", id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
def build_create_or_update_request(
resource_group_name: str, workspace_name: str, id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -188,7 +239,7 @@ def build_create_or_update_request(
"id": _SERIALIZER.url("id", id, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -207,7 +258,7 @@ def build_cancel_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -226,7 +277,7 @@ def build_cancel_request(
"id": _SERIALIZER.url("id", id, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -265,6 +316,10 @@ def list(
job_type: Optional[str] = None,
tag: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ asset_name: Optional[str] = None,
+ scheduled: Optional[bool] = None,
+ schedule_id: Optional[str] = None,
+ properties: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.JobBase"]:
"""Lists Jobs in the workspace.
@@ -285,6 +340,15 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param asset_name: Asset name the job's named output is registered with. Default value is None.
+ :type asset_name: str
+ :param scheduled: Indicator whether the job is scheduled job. Default value is None.
+ :type scheduled: bool
+ :param schedule_id: The scheduled id for listing the job triggered from. Default value is None.
+ :type schedule_id: str
+ :param properties: Comma-separated list of property names (and optionally values). Example:
+ prop1,prop2=value2. Default value is None.
+ :type properties: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobBase or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.JobBase]
@@ -315,6 +379,10 @@ def prepare_request(next_link=None):
job_type=job_type,
tag=tag,
list_view_type=list_view_type,
+ asset_name=asset_name,
+ scheduled=scheduled,
+ schedule_id=schedule_id,
+ properties=properties,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
@@ -566,6 +634,165 @@ def get(self, resource_group_name: str, workspace_name: str, id: str, **kwargs:
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
}
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: _models.PartialJobBasePartialResource,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.JobBase:
+ """Updates a Job.
+
+ Updates a Job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the Job. This is case-sensitive. Required.
+ :type id: str
+ :param body: Job definition to apply during the operation. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PartialJobBasePartialResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: JobBase or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.JobBase
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.JobBase:
+ """Updates a Job.
+
+ Updates a Job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the Job. This is case-sensitive. Required.
+ :type id: str
+ :param body: Job definition to apply during the operation. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: JobBase or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.JobBase
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.PartialJobBasePartialResource, IO],
+ **kwargs: Any
+ ) -> _models.JobBase:
+ """Updates a Job.
+
+ Updates a Job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the Job. This is case-sensitive. Required.
+ :type id: str
+ :param body: Job definition to apply during the operation. Is either a
+ PartialJobBasePartialResource type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PartialJobBasePartialResource or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: JobBase or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.JobBase
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.JobBase] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PartialJobBasePartialResource")
+
+ request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("JobBase", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
+ }
+
@overload
def create_or_update(
self,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_labeling_jobs_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_labeling_jobs_operations.py
new file mode 100644
index 000000000000..807fc976f9a4
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_labeling_jobs_operations.py
@@ -0,0 +1,1280 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ top: Optional[int] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if top is not None:
+ _params["$top"] = _SERIALIZER.query("top", top, "int")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, id: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "id": _SERIALIZER.url("id", id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, id: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "id": _SERIALIZER.url("id", id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, id: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "id": _SERIALIZER.url("id", id, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_export_labels_request(
+ resource_group_name: str, workspace_name: str, id: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/exportLabels",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "id": _SERIALIZER.url("id", id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_pause_request(
+ resource_group_name: str, workspace_name: str, id: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/pause",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "id": _SERIALIZER.url("id", id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_resume_request(
+ resource_group_name: str, workspace_name: str, id: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/resume",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "id": _SERIALIZER.url("id", id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class LabelingJobsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`labeling_jobs` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ top: Optional[int] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.LabelingJob"]:
+ """Lists labeling jobs in the workspace.
+
+ Lists labeling jobs in the workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param top: Number of labeling jobs to return. Default value is None.
+ :type top: int
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either LabelingJob or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.LabelingJobResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ top=top,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("LabelingJobResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs"
+ }
+
+ @distributed_trace
+ def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
+ ) -> None:
+ """Delete a labeling job.
+
+ Delete a labeling job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.delete.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}"
+ }
+
+ @distributed_trace
+ def get(self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any) -> _models.LabelingJob:
+ """Gets a labeling job by name/id.
+
+ Gets a labeling job by name/id.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: LabelingJob or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.LabelingJob
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.LabelingJob] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("LabelingJob", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}"
+ }
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.LabelingJob, IO],
+ **kwargs: Any
+ ) -> _models.LabelingJob:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.LabelingJob] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "LabelingJob")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("LabelingJob", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("LabelingJob", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}"
+ }
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: _models.LabelingJob,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.LabelingJob]:
+ """Creates or updates a labeling job (asynchronous).
+
+ Creates or updates a labeling job (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: LabelingJob definition object. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.LabelingJob
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either LabelingJob or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.LabelingJob]:
+ """Creates or updates a labeling job (asynchronous).
+
+ Creates or updates a labeling job (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: LabelingJob definition object. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either LabelingJob or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.LabelingJob, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.LabelingJob]:
+ """Creates or updates a labeling job (asynchronous).
+
+ Creates or updates a labeling job (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: LabelingJob definition object. Is either a LabelingJob type or a IO type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.LabelingJob or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either LabelingJob or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.LabelingJob]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.LabelingJob] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("LabelingJob", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}"
+ }
+
+ def _export_labels_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.ExportSummary, IO],
+ **kwargs: Any
+ ) -> Optional[_models.ExportSummary]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.ExportSummary]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "ExportSummary")
+
+ request = build_export_labels_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._export_labels_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("ExportSummary", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _export_labels_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/exportLabels"
+ }
+
+ @overload
+ def begin_export_labels(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: _models.ExportSummary,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ExportSummary]:
+ """Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: The export summary. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ExportSummary
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ExportSummary or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ExportSummary]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_export_labels(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ExportSummary]:
+ """Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: The export summary. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ExportSummary or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ExportSummary]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_export_labels(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.ExportSummary, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.ExportSummary]:
+ """Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ Export labels from a labeling job (asynchronous). Using the URL in the Location header, the
+ status of the job export operation can be tracked.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :param body: The export summary. Is either a ExportSummary type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ExportSummary or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ExportSummary or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ExportSummary]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ExportSummary] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._export_labels_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ExportSummary", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_export_labels.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/exportLabels"
+ }
+
+ @distributed_trace
+ def pause(
+ self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
+ ) -> _models.LabelingJobProperties:
+ """Pause a labeling job.
+
+ Pause a labeling job.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: LabelingJobProperties or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.LabelingJobProperties
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.LabelingJobProperties] = kwargs.pop("cls", None)
+
+ request = build_pause_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.pause.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("LabelingJobProperties", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ pause.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/pause"
+ }
+
+ def _resume_initial(
+ self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
+ ) -> Optional[_models.LabelingJobProperties]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Optional[_models.LabelingJobProperties]] = kwargs.pop("cls", None)
+
+ request = build_resume_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._resume_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("LabelingJobProperties", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _resume_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/resume"
+ }
+
+ @distributed_trace
+ def begin_resume(
+ self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
+ ) -> LROPoller[_models.LabelingJobProperties]:
+ """Resume a labeling job (asynchronous).
+
+ Resume a labeling job (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param id: The name and identifier for the LabelingJob. Required.
+ :type id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either LabelingJobProperties or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.LabelingJobProperties]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.LabelingJobProperties] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._resume_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ id=id,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("LabelingJobProperties", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_resume.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/labelingJobs/{id}/resume"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_provisions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_provisions_operations.py
new file mode 100644
index 000000000000..1b3f51a36595
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_provisions_operations.py
@@ -0,0 +1,340 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_provision_managed_network_request(
+ resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/provisionManagedNetwork",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ManagedNetworkProvisionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`managed_network_provisions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ def _provision_managed_network_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[Union[_models.ManagedNetworkProvisionOptions, IO]] = None,
+ **kwargs: Any
+ ) -> Optional[_models.ManagedNetworkProvisionStatus]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.ManagedNetworkProvisionStatus]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "ManagedNetworkProvisionOptions")
+ else:
+ _json = None
+
+ request = build_provision_managed_network_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._provision_managed_network_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("ManagedNetworkProvisionStatus", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _provision_managed_network_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/provisionManagedNetwork"
+ }
+
+ @overload
+ def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[_models.ManagedNetworkProvisionOptions] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Default
+ value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionOptions
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[IO] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Default
+ value is None.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[Union[_models.ManagedNetworkProvisionOptions, IO]] = None,
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Is either a
+ ManagedNetworkProvisionOptions type or a IO type. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionOptions or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedNetworkProvisionStatus] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._provision_managed_network_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ManagedNetworkProvisionStatus", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_provision_managed_network.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/provisionManagedNetwork"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_settings_rule_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_settings_rule_operations.py
new file mode 100644
index 000000000000..2b06f7b95f9f
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_settings_rule_operations.py
@@ -0,0 +1,752 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, rule_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "ruleName": _SERIALIZER.url("rule_name", rule_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, rule_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "ruleName": _SERIALIZER.url("rule_name", rule_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, rule_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "ruleName": _SERIALIZER.url("rule_name", rule_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ManagedNetworkSettingsRuleOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`managed_network_settings_rule` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, **kwargs: Any
+ ) -> Iterable["_models.OutboundRuleBasicResource"]:
+ """Lists the managed network outbound rules for a machine learning workspace.
+
+ Lists the managed network outbound rules for a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either OutboundRuleBasicResource or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundRuleListResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("OutboundRuleListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules"
+ }
+
+ def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes an outbound rule from the managed network of a machine learning workspace.
+
+ Deletes an outbound rule from the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> _models.OutboundRuleBasicResource:
+ """Gets an outbound rule from the managed network of a machine learning workspace.
+
+ Gets an outbound rule from the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: OutboundRuleBasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundRuleBasicResource] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: Union[_models.OutboundRuleBasicResource, IO],
+ **kwargs: Any
+ ) -> Optional[_models.OutboundRuleBasicResource]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.OutboundRuleBasicResource]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "OutboundRuleBasicResource")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: _models.OutboundRuleBasicResource,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either OutboundRuleBasicResource or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either OutboundRuleBasicResource or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: Union[_models.OutboundRuleBasicResource, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Is either a OutboundRuleBasicResource type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either OutboundRuleBasicResource or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.OutboundRuleBasicResource] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_containers_operations.py
index bdc20a437ab9..2c6636fb7a3a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_containers_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -50,7 +50,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -68,7 +68,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -91,7 +91,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -110,7 +110,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -127,7 +127,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -146,7 +146,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -163,7 +163,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -183,7 +183,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_versions_operations.py
index 94082df98347..6cfe3217a9e8 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_versions_operations.py
@@ -7,7 +7,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -21,14 +21,16 @@
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,12 +55,13 @@ def build_list_request(
properties: Optional[str] = None,
feed: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -77,7 +80,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -101,6 +104,8 @@ def build_list_request(
_params["feed"] = _SERIALIZER.query("feed", feed, "str")
if list_view_type is not None:
_params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -114,7 +119,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -134,7 +139,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -151,7 +156,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -171,7 +176,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -188,7 +193,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -209,7 +214,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -222,6 +227,46 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+def build_package_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}/package",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
class ModelVersionsOperations:
"""
.. warning::
@@ -257,6 +302,7 @@ def list(
properties: Optional[str] = None,
feed: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.ModelVersion"]:
"""List model versions.
@@ -293,6 +339,8 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: Model stage. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelVersion or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ModelVersion]
@@ -330,6 +378,7 @@ def prepare_request(next_link=None):
properties=properties,
feed=feed,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
@@ -699,3 +748,267 @@ def create_or_update(
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}"
}
+
+ def _package_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.PackageRequest, IO],
+ **kwargs: Any
+ ) -> Optional[_models.PackageResponse]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.PackageResponse]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PackageRequest")
+
+ request = build_package_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._package_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("PackageResponse", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _package_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}/package"
+ }
+
+ @overload
+ def begin_package(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.PackageRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PackageRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_package(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_package(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.PackageRequest, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Is either a PackageRequest type or a IO type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PackageRequest or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.PackageResponse] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._package_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("PackageResponse", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_package.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}/package"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_deployments_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_deployments_operations.py
index 618d15662d98..695e86a22800 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_deployments_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_deployments_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +53,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +72,7 @@ def build_list_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -100,7 +100,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -120,7 +120,7 @@ def build_delete_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -142,7 +142,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -162,7 +162,7 @@ def build_get_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -184,7 +184,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -209,7 +209,7 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -233,7 +233,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -258,7 +258,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -282,7 +282,7 @@ def build_get_logs_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -303,7 +303,7 @@ def build_get_logs_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -330,7 +330,7 @@ def build_list_skus_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -350,7 +350,7 @@ def build_list_skus_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_endpoints_operations.py
index 6fbb16092160..b76f06904725 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_endpoints_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_endpoints_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -56,7 +56,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -74,7 +74,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -105,7 +105,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -124,7 +124,7 @@ def build_delete_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -141,7 +141,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -160,7 +160,7 @@ def build_get_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -177,7 +177,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -197,7 +197,7 @@ def build_update_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -216,7 +216,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -238,7 +238,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -257,7 +257,7 @@ def build_list_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -276,7 +276,7 @@ def build_list_keys_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -293,7 +293,7 @@ def build_regenerate_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -313,7 +313,7 @@ def build_regenerate_keys_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -332,7 +332,7 @@ def build_get_token_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -351,7 +351,7 @@ def build_get_token_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_operations.py
index 7dbac1fe15f1..229c12617a33 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_operations.py
@@ -40,7 +40,7 @@ def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -75,19 +75,21 @@ def __init__(self, *args, **kwargs):
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
- def list(self, **kwargs: Any) -> Iterable["_models.AmlOperation"]:
+ def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available Azure Machine Learning Workspaces REST API operations.
+ Lists all of the available Azure Machine Learning Workspaces REST API operations.
+
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either AmlOperation or the result of cls(response)
- :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.AmlOperation]
+ :return: An iterator like instance of either Operation or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.AmlOperationListResult] = kwargs.pop("cls", None)
+ cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
@@ -128,7 +130,7 @@ def prepare_request(next_link=None):
return request
def extract_data(pipeline_response):
- deserialized = self._deserialize("AmlOperationListResult", pipeline_response)
+ deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_endpoint_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_endpoint_connections_operations.py
index a5d73b4be943..81b37cf2196e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_endpoint_connections_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_endpoint_connections_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -43,7 +43,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -52,16 +52,16 @@ def build_list_request(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections",
) # pylint: disable=line-too-long
path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -72,7 +72,7 @@ def build_list_request(
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
-def build_get_request(
+def build_delete_request(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
@@ -82,7 +82,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -103,7 +103,7 @@ def build_get_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -111,10 +111,10 @@ def build_get_request(
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
-def build_create_or_update_request(
+def build_get_request(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
@@ -124,8 +124,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -146,20 +145,18 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
- if content_type is not None:
- _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
-def build_delete_request(
+def build_create_or_update_request(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
@@ -169,7 +166,8 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -190,15 +188,17 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
class PrivateEndpointConnectionsOperations:
@@ -224,12 +224,14 @@ def __init__(self, *args, **kwargs):
def list(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
) -> Iterable["_models.PrivateEndpointConnection"]:
- """List all the private endpoint connections associated with the workspace.
+ """Called by end-users to get all PE connections.
+
+ Called by end-users to get all PE connections.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnection or the result of
@@ -314,19 +316,86 @@ def get_next(next_link=None):
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections"
}
+ @distributed_trace
+ def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any
+ ) -> None:
+ """Called by end-users to delete a PE connection.
+
+ Called by end-users to delete a PE connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
+ :type private_endpoint_connection_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.delete.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
+ }
+
@distributed_trace
def get(
self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
- """Gets the specified private endpoint connection associated with the workspace.
+ """Called by end-users to get a PE connection.
+
+ Called by end-users to get a PE connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
@@ -389,23 +458,26 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: _models.PrivateEndpointConnection,
+ body: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
- """Update the state of specified private endpoint connection associated with the workspace.
+ """Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
+
+ Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
:type private_endpoint_connection_name: str
- :param properties: The private endpoint connection properties. Required.
- :type properties: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
+ :param body: PrivateEndpointConnection object. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
@@ -421,23 +493,26 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: IO,
+ body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
- """Update the state of specified private endpoint connection associated with the workspace.
+ """Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
+
+ Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
:type private_endpoint_connection_name: str
- :param properties: The private endpoint connection properties. Required.
- :type properties: IO
+ :param body: PrivateEndpointConnection object. Required.
+ :type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
@@ -453,22 +528,25 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: Union[_models.PrivateEndpointConnection, IO],
+ body: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
- """Update the state of specified private endpoint connection associated with the workspace.
+ """Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
+
+ Called by end-users to approve or reject a PE connection.
+ This method must validate and forward the call to NRP.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
+ :param private_endpoint_connection_name: NRP Private Endpoint Connection Name. Required.
:type private_endpoint_connection_name: str
- :param properties: The private endpoint connection properties. Is either a
- PrivateEndpointConnection type or a IO type. Required.
- :type properties: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection or IO
+ :param body: PrivateEndpointConnection object. Is either a PrivateEndpointConnection type or a
+ IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
@@ -495,10 +573,10 @@ def create_or_update(
content_type = content_type or "application/json"
_json = None
_content = None
- if isinstance(properties, (IOBase, bytes)):
- _content = properties
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
else:
- _json = self._serialize.body(properties, "PrivateEndpointConnection")
+ _json = self._serialize.body(body, "PrivateEndpointConnection")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
@@ -538,68 +616,3 @@ def create_or_update(
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
-
- @distributed_trace
- def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any
- ) -> None:
- """Deletes the specified private endpoint connection associated with the workspace.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :param private_endpoint_connection_name: The name of the private endpoint connection associated
- with the workspace. Required.
- :type private_endpoint_connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: None or the result of cls(response)
- :rtype: None
- :raises ~azure.core.exceptions.HttpResponseError:
- """
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
-
- request = build_delete_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- private_endpoint_connection_name=private_endpoint_connection_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.delete.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200, 204]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
-
- if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_link_resources_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_link_resources_operations.py
index a262b6f295cc..883bdfa30c94 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_link_resources_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_link_resources_operations.py
@@ -6,7 +6,8 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from typing import Any, Callable, Dict, Optional, TypeVar
+from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
+import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
@@ -16,6 +17,7 @@
ResourceNotModifiedError,
map_error,
)
+from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
@@ -25,7 +27,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -40,7 +42,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -58,7 +60,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -91,19 +93,40 @@ def __init__(self, *args, **kwargs):
@distributed_trace
def list(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.PrivateLinkResourceListResult:
- """Gets the private link resources that need to be created for a workspace.
+ ) -> Iterable["_models.PrivateLinkResource"]:
+ """Called by Client (Portal, CLI, etc) to get available "private link resources" for the
+ workspace.
+ Each "private link resource" is a connection endpoint (IP address) to the resource.
+ Pre single connection endpoint per workspace: the Data Plane IP address, returned by DNS
+ resolution.
+ Other RPs, such as Azure Storage, have multiple - one for Blobs, other for Queues, etc.
+ Defined in the "[NRP] Private Endpoint Design" doc, topic "GET API for GroupIds".
+
+ Called by Client (Portal, CLI, etc) to get available "private link resources" for the
+ workspace.
+ Each "private link resource" is a connection endpoint (IP address) to the resource.
+ Pre single connection endpoint per workspace: the Data Plane IP address, returned by DNS
+ resolution.
+ Other RPs, such as Azure Storage, have multiple - one for Blobs, other for Queues, etc.
+ Defined in the "[NRP] Private Endpoint Design" doc, topic "GET API for GroupIds".
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: PrivateLinkResourceListResult or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.PrivateLinkResourceListResult
+ :return: An iterator like instance of either PrivateLinkResource or the result of cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None)
+
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -112,42 +135,63 @@ def list(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None)
-
- request = build_list_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.list.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
-
- deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateLinkResources"
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_quotas_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_quotas_operations.py
index 1896a7522c0b..3733da6db14c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_quotas_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_quotas_operations.py
@@ -28,7 +28,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -41,7 +41,7 @@ def build_update_request(location: str, subscription_id: str, **kwargs: Any) ->
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -55,7 +55,7 @@ def build_update_request(location: str, subscription_id: str, **kwargs: Any) ->
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -72,7 +72,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -85,7 +85,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
"location": _SERIALIZER.url("location", location, "str", pattern=r"^[-\w\._]+$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registries_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registries_operations.py
index 089d9208e62c..82e194f3d2ef 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registries_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registries_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -43,7 +43,7 @@ def build_list_by_subscription_request(subscription_id: str, **kwargs: Any) -> H
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -54,7 +54,7 @@ def build_list_by_subscription_request(subscription_id: str, **kwargs: Any) -> H
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -69,7 +69,7 @@ def build_list_request(resource_group_name: str, subscription_id: str, **kwargs:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -84,7 +84,7 @@ def build_list_request(resource_group_name: str, subscription_id: str, **kwargs:
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -101,7 +101,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -119,7 +119,7 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -134,7 +134,7 @@ def build_get_request(resource_group_name: str, registry_name: str, subscription
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -152,7 +152,7 @@ def build_get_request(resource_group_name: str, registry_name: str, subscription
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -169,7 +169,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -188,7 +188,7 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -207,7 +207,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -226,7 +226,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -245,7 +245,7 @@ def build_remove_regions_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -264,7 +264,7 @@ def build_remove_regions_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_containers_operations.py
index c925671f74d2..e6aa97f4d05f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_containers_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -45,7 +45,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -63,7 +63,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -82,7 +82,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -101,7 +101,7 @@ def build_delete_request(
"codeName": _SERIALIZER.url("code_name", code_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -118,7 +118,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -137,7 +137,7 @@ def build_get_request(
"codeName": _SERIALIZER.url("code_name", code_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -154,7 +154,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -174,7 +174,7 @@ def build_create_or_update_request(
"codeName": _SERIALIZER.url("code_name", code_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_versions_operations.py
index 431682513a37..04ef7228d53d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_versions_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +53,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +72,7 @@ def build_list_request(
"codeName": _SERIALIZER.url("code_name", code_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -95,7 +95,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -115,7 +115,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -132,7 +132,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -152,7 +152,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -169,7 +169,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -190,7 +190,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -209,7 +209,7 @@ def build_create_or_get_start_pending_upload_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -230,7 +230,7 @@ def build_create_or_get_start_pending_upload_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_containers_operations.py
index 8fa9042f77a3..bf2c3205a2ae 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_containers_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -45,7 +45,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -63,7 +63,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -82,7 +82,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -103,7 +103,7 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -120,7 +120,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -141,7 +141,7 @@ def build_get_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -158,7 +158,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -180,7 +180,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_versions_operations.py
index 7bb69cf38e1a..f525b3ad42df 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_versions_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -48,12 +48,13 @@ def build_list_request(
order_by: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[str] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -74,7 +75,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -84,6 +85,8 @@ def build_list_request(
_params["$top"] = _SERIALIZER.query("top", top, "int")
if skip is not None:
_params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -97,7 +100,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -119,7 +122,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -136,7 +139,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -158,7 +161,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -175,7 +178,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -198,7 +201,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -239,6 +242,7 @@ def list(
order_by: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[str] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.ComponentVersion"]:
"""List versions.
@@ -259,6 +263,8 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
+ :param stage: Component stage. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentVersion or the result of cls(response)
:rtype:
@@ -290,6 +296,7 @@ def prepare_request(next_link=None):
order_by=order_by,
top=top,
skip=skip,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_containers_operations.py
index a5a3f85fe645..a791e8aaef71 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_containers_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +51,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +69,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +90,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -109,7 +109,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -126,7 +126,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -145,7 +145,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -162,7 +162,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -182,7 +182,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_versions_operations.py
index 7fda48b37788..9561ed06ca35 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_versions_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -55,7 +55,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -74,7 +74,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -101,7 +101,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -121,7 +121,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -138,7 +138,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -158,7 +158,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -175,7 +175,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -196,7 +196,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -215,7 +215,7 @@ def build_create_or_get_start_pending_upload_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -236,7 +236,7 @@ def build_create_or_get_start_pending_upload_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_containers_operations.py
index 3a3ff9edbad1..9b9c2aeb1cfc 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_containers_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +51,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +69,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +90,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -111,7 +111,7 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -128,7 +128,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -149,7 +149,7 @@ def build_get_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -166,7 +166,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -188,7 +188,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_versions_operations.py
index 727a7a9602ce..777526c27cf9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_versions_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -49,12 +49,13 @@ def build_list_request(
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -75,7 +76,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -87,6 +88,8 @@ def build_list_request(
_params["$skip"] = _SERIALIZER.query("skip", skip, "str")
if list_view_type is not None:
_params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -105,7 +108,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -127,7 +130,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -149,7 +152,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -171,7 +174,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -193,7 +196,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -216,7 +219,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -258,6 +261,7 @@ def list(
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.EnvironmentVersion"]:
"""List versions.
@@ -281,6 +285,9 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param stage: Stage for including/excluding (for example) archived entities. Takes priority
+ over listViewType. Default value is None.
+ :type stage: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentVersion or the result of cls(response)
:rtype:
@@ -313,6 +320,7 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
list_view_type=list_view_type,
+ stage=stage,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_containers_operations.py
index 8c44417d63e7..5a88e6196090 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_containers_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +51,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +69,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +90,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -109,7 +109,7 @@ def build_delete_request(
"modelName": _SERIALIZER.url("model_name", model_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -126,7 +126,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -145,7 +145,7 @@ def build_get_request(
"modelName": _SERIALIZER.url("model_name", model_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -162,7 +162,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -182,7 +182,7 @@ def build_create_or_update_request(
"modelName": _SERIALIZER.url("model_name", model_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_versions_operations.py
index d883c93e7433..f583f64f8d4b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_versions_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -58,7 +58,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -77,7 +77,7 @@ def build_list_request(
"modelName": _SERIALIZER.url("model_name", model_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -110,7 +110,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -130,7 +130,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -147,7 +147,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -167,7 +167,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -184,7 +184,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -205,7 +205,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -218,13 +218,53 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+def build_package_request(
+ resource_group_name: str, registry_name: str, model_name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}/package",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "registryName": _SERIALIZER.url(
+ "registry_name", registry_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{2,32}$"
+ ),
+ "modelName": _SERIALIZER.url("model_name", model_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
def build_create_or_get_start_pending_upload_request(
resource_group_name: str, registry_name: str, model_name: str, version: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -245,7 +285,7 @@ def build_create_or_get_start_pending_upload_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -892,6 +932,273 @@ def get_long_running_output(pipeline_response):
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
}
+ def _package_initial(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ model_name: str,
+ version: str,
+ body: Union[_models.PackageRequest, IO],
+ **kwargs: Any
+ ) -> Optional[_models.PackageResponse]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.PackageResponse]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PackageRequest")
+
+ request = build_package_request(
+ resource_group_name=resource_group_name,
+ registry_name=registry_name,
+ model_name=model_name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._package_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("PackageResponse", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _package_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}/package"
+ }
+
+ @overload
+ def begin_package(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ model_name: str,
+ version: str,
+ body: _models.PackageRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param model_name: Container name. This is case-sensitive. Required.
+ :type model_name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PackageRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_package(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ model_name: str,
+ version: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param model_name: Container name. This is case-sensitive. Required.
+ :type model_name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_package(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ model_name: str,
+ version: str,
+ body: Union[_models.PackageRequest, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.PackageResponse]:
+ """Model Version Package operation.
+
+ Model Version Package operation.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param model_name: Container name. This is case-sensitive. Required.
+ :type model_name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Package operation request body. Is either a PackageRequest type or a IO type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PackageRequest or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either PackageResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.PackageResponse] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._package_initial(
+ resource_group_name=resource_group_name,
+ registry_name=registry_name,
+ model_name=model_name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("PackageResponse", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_package.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}/package"
+ }
+
@overload
def create_or_get_start_pending_upload(
self,
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_schedules_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_schedules_operations.py
index 2e25ec11855a..a0eeed503f41 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_schedules_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_schedules_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +51,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +69,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +90,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -109,7 +109,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -126,7 +126,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -145,7 +145,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -162,7 +162,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -182,7 +182,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_serverless_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_serverless_endpoints_operations.py
new file mode 100644
index 000000000000..4a91765969b3
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_serverless_endpoints_operations.py
@@ -0,0 +1,1558 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+from .._vendor import _convert_request
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str, workspace_name: str, subscription_id: str, *, skip: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_keys_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/listKeys",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_regenerate_keys_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/regenerateKeys",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_status_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/getStatus",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ServerlessEndpointsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`serverless_endpoints` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, skip: Optional[str] = None, **kwargs: Any
+ ) -> Iterable["_models.ServerlessEndpoint"]:
+ """List Serverless Endpoints.
+
+ List Serverless Endpoints.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either ServerlessEndpoint or the result of cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpointTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpointTrackedResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints"
+ }
+
+ def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self._delete_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ _delete_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @distributed_trace
+ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> LROPoller[None]:
+ """Delete Serverless Endpoint (asynchronous).
+
+ Delete Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.ServerlessEndpoint:
+ """Get Serverless Endpoint.
+
+ Get Serverless Endpoint.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: ServerlessEndpoint or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ def _update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO],
+ **kwargs: Any
+ ) -> Optional[_models.ServerlessEndpoint]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.ServerlessEndpoint]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PartialMinimalTrackedResourceWithSkuAndIdentity")
+
+ request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @overload
+ def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.PartialMinimalTrackedResourceWithSkuAndIdentity,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Is either a
+ PartialMinimalTrackedResourceWithSkuAndIdentity type or a IO type. Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity or
+ IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.ServerlessEndpoint, IO],
+ **kwargs: Any
+ ) -> _models.ServerlessEndpoint:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "ServerlessEndpoint")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.ServerlessEndpoint,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.ServerlessEndpoint, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Is either a
+ ServerlessEndpoint type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}"
+ }
+
+ @distributed_trace
+ def list_keys(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.EndpointAuthKeys:
+ """List EndpointAuthKeys for an Endpoint using Key-based authentication.
+
+ List EndpointAuthKeys for an Endpoint using Key-based authentication.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: EndpointAuthKeys or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
+
+ request = build_list_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list_keys.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ list_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/listKeys"
+ }
+
+ def _regenerate_keys_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.RegenerateEndpointKeysRequest, IO],
+ **kwargs: Any
+ ) -> Optional[_models.EndpointAuthKeys]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.EndpointAuthKeys]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "RegenerateEndpointKeysRequest")
+
+ request = build_regenerate_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._regenerate_keys_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _regenerate_keys_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/regenerateKeys"
+ }
+
+ @overload
+ def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.RegenerateEndpointKeysRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.RegenerateEndpointKeysRequest, IO],
+ **kwargs: Any
+ ) -> LROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Is either a RegenerateEndpointKeysRequest type or a IO
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._regenerate_keys_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ begin_regenerate_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/regenerateKeys"
+ }
+
+ @distributed_trace
+ def get_status(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.ServerlessEndpointStatus:
+ """Status of the model backing the Serverless Endpoint.
+
+ Status of the model backing the Serverless Endpoint.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: ServerlessEndpointStatus or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ServerlessEndpointStatus
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpointStatus] = kwargs.pop("cls", None)
+
+ request = build_get_status_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.get_status.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ServerlessEndpointStatus", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get_status.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/getStatus"
+ }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_usages_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_usages_operations.py
index 3dc29c049f1d..d72d8db8a24b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_usages_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_usages_operations.py
@@ -27,7 +27,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -40,7 +40,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -53,7 +53,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
"location": _SERIALIZER.url("location", location, "str", pattern=r"^[-\w\._]+$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_virtual_machine_sizes_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_virtual_machine_sizes_operations.py
index 27bb7d3b448a..2a0973e82c71 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_virtual_machine_sizes_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_virtual_machine_sizes_operations.py
@@ -25,7 +25,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -38,7 +38,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -51,7 +51,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_connections_operations.py
index 28c7857fccfb..ff8ba826b01b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_connections_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_connections_operations.py
@@ -7,7 +7,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
+from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -21,14 +21,16 @@
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -37,14 +39,58 @@
_SERIALIZER.client_side_validation = False
-def build_create_request(
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ subscription_id: str,
+ *,
+ target: Optional[str] = None,
+ category: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ if target is not None:
+ _params["target"] = _SERIALIZER.query("target", target, "str")
+ if category is not None:
+ _params["category"] = _SERIALIZER.query("category", category, "str")
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
resource_group_name: str, workspace_name: str, connection_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -60,29 +106,35 @@ def build_create_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
- if content_type is not None:
- _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
- resource_group_name: str, workspace_name: str, connection_name: str, subscription_id: str, **kwargs: Any
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ subscription_id: str,
+ *,
+ aoai_models_to_deploy: Optional[str] = None,
+ **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -98,12 +150,16 @@ def build_get_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
+ if aoai_models_to_deploy is not None:
+ _params["aoaiModelsToDeploy"] = _SERIALIZER.query("aoai_models_to_deploy", aoai_models_to_deploy, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
@@ -112,13 +168,14 @@ def build_get_request(
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
-def build_delete_request(
+def build_update_request(
resource_group_name: str, workspace_name: str, connection_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -134,39 +191,125 @@ def build_delete_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
-def build_list_request(
+def build_create_request(
+ resource_group_name: str, workspace_name: str, connection_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_secrets_request(
resource_group_name: str,
workspace_name: str,
+ connection_name: str,
subscription_id: str,
*,
- target: Optional[str] = None,
- category: Optional[str] = None,
+ aoai_models_to_deploy: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/listsecrets",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if aoai_models_to_deploy is not None:
+ _params["aoaiModelsToDeploy"] = _SERIALIZER.query("aoai_models_to_deploy", aoai_models_to_deploy, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_test_connection_request(
+ resource_group_name: str, workspace_name: str, connection_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/testconnection",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
@@ -176,41 +319,458 @@ def build_list_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class WorkspaceConnectionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`workspace_connections` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ target: Optional[str] = None,
+ category: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.WorkspaceConnectionPropertiesV2BasicResource"]:
+ """Lists all the available machine learning workspaces connections under the specified workspace.
+
+ Lists all the available machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param target: Target of the workspace connection. Default value is None.
+ :type target: str
+ :param category: Category of the workspace connection. Default value is None.
+ :type category: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either WorkspaceConnectionPropertiesV2BasicResource or
+ the result of cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ target=target,
+ category=category,
+ api_version=api_version,
+ template_url=self.list.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult", pipeline_response
+ )
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections"
+ }
+
+ @distributed_trace
+ def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any
+ ) -> None:
+ """Delete machine learning workspaces connections by name.
+
+ Delete machine learning workspaces connections by name.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.delete.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ delete.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
+ }
+
+ @distributed_trace
+ def get(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ aoai_models_to_deploy: Optional[str] = None,
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Lists machine learning workspaces connections by name.
+
+ Lists machine learning workspaces connections by name.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param aoai_models_to_deploy: query parameter for which AOAI mode should be deployed. Default
+ value is None.
+ :type aoai_models_to_deploy: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
+
+ request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ aoai_models_to_deploy=aoai_models_to_deploy,
+ api_version=api_version,
+ template_url=self.get.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("WorkspaceConnectionPropertiesV2BasicResource", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ get.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[_models.WorkspaceConnectionUpdateParameter] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUpdateParameter
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[IO] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Default value is None.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[Union[_models.WorkspaceConnectionUpdateParameter, IO]] = None,
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Is either a
+ WorkspaceConnectionUpdateParameter type or a IO type. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUpdateParameter or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
- # Construct parameters
- if target is not None:
- _params["target"] = _SERIALIZER.query("target", target, "str")
- if category is not None:
- _params["category"] = _SERIALIZER.query("category", category, "str")
- _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "WorkspaceConnectionUpdateParameter")
+ else:
+ _json = None
- # Construct headers
- _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+ request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self.update.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
- return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
-class WorkspaceConnectionsOperations:
- """
- .. warning::
- **DO NOT** instantiate this class directly.
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- Instead, you should access the following operations through
- :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
- :attr:`workspace_connections` attribute.
- """
+ deserialized = self._deserialize("WorkspaceConnectionPropertiesV2BasicResource", pipeline_response)
- models = _models
+ if cls:
+ return cls(pipeline_response, deserialized, {})
- def __init__(self, *args, **kwargs):
- input_args = list(args)
- self._client = input_args.pop(0) if input_args else kwargs.pop("client")
- self._config = input_args.pop(0) if input_args else kwargs.pop("config")
- self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
- self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+ return deserialized
+
+ update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
+ }
@overload
def create(
@@ -218,22 +778,25 @@ def create(
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: _models.WorkspaceConnectionPropertiesV2BasicResource,
+ body: Optional[_models.WorkspaceConnectionPropertiesV2BasicResource] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
- """create.
+ """Create or update machine learning workspaces connections under the specified workspace.
+
+ Create or update machine learning workspaces connections under the specified workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :param parameters: The object for creating or updating a new workspace connection. Required.
- :type parameters:
+ :param body: The object for creating or updating a new workspace connection. Default value is
+ None.
+ :type body:
~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
@@ -250,22 +813,25 @@ def create(
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: IO,
+ body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
- """create.
+ """Create or update machine learning workspaces connections under the specified workspace.
+
+ Create or update machine learning workspaces connections under the specified workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :param parameters: The object for creating or updating a new workspace connection. Required.
- :type parameters: IO
+ :param body: The object for creating or updating a new workspace connection. Default value is
+ None.
+ :type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
@@ -281,21 +847,23 @@ def create(
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO],
+ body: Optional[Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO]] = None,
**kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
- """create.
+ """Create or update machine learning workspaces connections under the specified workspace.
+
+ Create or update machine learning workspaces connections under the specified workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :param parameters: The object for creating or updating a new workspace connection. Is either a
- WorkspaceConnectionPropertiesV2BasicResource type or a IO type. Required.
- :type parameters:
+ :param body: The object for creating or updating a new workspace connection. Is either a
+ WorkspaceConnectionPropertiesV2BasicResource type or a IO type. Default value is None.
+ :type body:
~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
@@ -323,10 +891,13 @@ def create(
content_type = content_type or "application/json"
_json = None
_content = None
- if isinstance(parameters, (IOBase, bytes)):
- _content = parameters
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
else:
- _json = self._serialize.body(parameters, "WorkspaceConnectionPropertiesV2BasicResource")
+ if body is not None:
+ _json = self._serialize.body(body, "WorkspaceConnectionPropertiesV2BasicResource")
+ else:
+ _json = None
request = build_create_request(
resource_group_name=resource_group_name,
@@ -368,18 +939,28 @@ def create(
}
@distributed_trace
- def get(
- self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any
+ def list_secrets(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ aoai_models_to_deploy: Optional[str] = None,
+ **kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
- """get.
+ """List all the secrets of a machine learning workspaces connections.
+
+ List all the secrets of a machine learning workspaces connections.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
+ :param aoai_models_to_deploy: query parameter for which AOAI mode should be deployed. Default
+ value is None.
+ :type aoai_models_to_deploy: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
@@ -399,13 +980,14 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
- request = build_get_request(
+ request = build_list_secrets_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
subscription_id=self._config.subscription_id,
+ aoai_models_to_deploy=aoai_models_to_deploy,
api_version=api_version,
- template_url=self.get.metadata["url"],
+ template_url=self.list_secrets.metadata["url"],
headers=_headers,
params=_params,
)
@@ -431,28 +1013,18 @@ def get(
return deserialized
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
+ list_secrets.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/listsecrets"
}
- @distributed_trace
- def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any
+ def _test_connection_initial( # pylint: disable=inconsistent-return-statements
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO]] = None,
+ **kwargs: Any
) -> None:
- """delete.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :param connection_name: Friendly name of the workspace connection. Required.
- :type connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: None or the result of cls(response)
- :rtype: None
- :raises ~azure.core.exceptions.HttpResponseError:
- """
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -461,19 +1033,34 @@ def delete( # pylint: disable=inconsistent-return-statements
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "WorkspaceConnectionPropertiesV2BasicResource")
+ else:
+ _json = None
+
+ request = build_test_connection_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._test_connection_initial.metadata["url"],
headers=_headers,
params=_params,
)
@@ -487,121 +1074,185 @@ def delete( # pylint: disable=inconsistent-return-statements
response = pipeline_response.http_response
- if response.status_code not in [200, 204]:
+ if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ response_headers = {}
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, response_headers)
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
+ _test_connection_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/testconnection"
}
- @distributed_trace
- def list(
+ @overload
+ def begin_test_connection(
self,
resource_group_name: str,
workspace_name: str,
- target: Optional[str] = None,
- category: Optional[str] = None,
+ connection_name: str,
+ body: Optional[_models.WorkspaceConnectionPropertiesV2BasicResource] = None,
+ *,
+ content_type: str = "application/json",
**kwargs: Any
- ) -> Iterable["_models.WorkspaceConnectionPropertiesV2BasicResource"]:
- """list.
+ ) -> LROPoller[None]:
+ """Test machine learning workspaces connections under the specified workspace.
+
+ Test machine learning workspaces connections under the specified workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param target: Target of the workspace connection. Default value is None.
- :type target: str
- :param category: Category of the workspace connection. Default value is None.
- :type category: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Workspace Connection object. Default value is None.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either WorkspaceConnectionPropertiesV2BasicResource or
- the result of cls(response)
- :rtype:
- ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource]
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult] = kwargs.pop("cls", None)
-
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
+ @overload
+ def begin_test_connection(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[IO] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Test machine learning workspaces connections under the specified workspace.
- def prepare_request(next_link=None):
- if not next_link:
+ Test machine learning workspaces connections under the specified workspace.
- request = build_list_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- target=target,
- category=category,
- api_version=api_version,
- template_url=self.list.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Workspace Connection object. Default value is None.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
- else:
- # make call to next link with the client's api-version
- _parsed_next_link = urllib.parse.urlparse(next_link)
- _next_request_params = case_insensitive_dict(
- {
- key: [urllib.parse.quote(v) for v in value]
- for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
- }
- )
- _next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
- "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ @distributed_trace
+ def begin_test_connection(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO]] = None,
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Test machine learning workspaces connections under the specified workspace.
- def extract_data(pipeline_response):
- deserialized = self._deserialize(
- "WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult", pipeline_response
- )
- list_of_elem = deserialized.value
- if cls:
- list_of_elem = cls(list_of_elem) # type: ignore
- return deserialized.next_link or None, iter(list_of_elem)
+ Test machine learning workspaces connections under the specified workspace.
- def get_next(next_link=None):
- request = prepare_request(next_link)
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Workspace Connection object. Is either a
+ WorkspaceConnectionPropertiesV2BasicResource type or a IO type. Default value is None.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- _stream = False
- pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._test_connection_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ kwargs.pop("error_map", None)
- return pipeline_response
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
- return ItemPaged(get_next, extract_data)
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections"
+ begin_test_connection.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/testconnection"
}
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_features_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_features_operations.py
index a8b05127e633..32a640d0d103 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_features_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_features_operations.py
@@ -27,7 +27,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -42,7 +42,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -60,7 +60,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspaces_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspaces_operations.py
index 3531e4cd54cc..e24f7127d272 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspaces_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspaces_operations.py
@@ -30,7 +30,7 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -39,34 +39,31 @@
_SERIALIZER.client_side_validation = False
-def build_get_request(
- resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
+def build_list_by_subscription_request(
+ subscription_id: str, *, kind: Optional[str] = None, skip: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
- "template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}",
+ "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/workspaces"
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
- "resourceGroupName": _SERIALIZER.url(
- "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
- ),
- "workspaceName": _SERIALIZER.url(
- "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
- ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if kind is not None:
+ _params["kind"] = _SERIALIZER.query("kind", kind, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -74,51 +71,54 @@ def build_get_request(
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
-def build_create_or_update_request(
- resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
+def build_list_by_resource_group_request(
+ resource_group_name: str,
+ subscription_id: str,
+ *,
+ kind: Optional[str] = None,
+ skip: Optional[str] = None,
+ **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
- "workspaceName": _SERIALIZER.url(
- "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
- ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if kind is not None:
+ _params["kind"] = _SERIALIZER.query("kind", kind, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
# Construct headers
- if content_type is not None:
- _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
- resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
+ resource_group_name: str, workspace_name: str, subscription_id: str, *, force_to_purge: bool = False, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -136,10 +136,12 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if force_to_purge is not None:
+ _params["forceToPurge"] = _SERIALIZER.query("force_to_purge", force_to_purge, "bool")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -147,14 +149,13 @@ def build_delete_request(
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
-def build_update_request(
+def build_get_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -172,67 +173,69 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
- if content_type is not None:
- _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
-def build_list_by_resource_group_request(
- resource_group_name: str, subscription_id: str, *, skip: Optional[str] = None, **kwargs: Any
+def build_update_request(
+ resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
- if skip is not None:
- _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
# Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
-def build_diagnose_request(
+def build_create_or_update_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/diagnose",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
@@ -244,7 +247,7 @@ def build_diagnose_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -254,22 +257,23 @@ def build_diagnose_request(
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
-def build_list_keys_request(
+def build_diagnose_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listKeys",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/diagnose",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
@@ -281,30 +285,32 @@ def build_list_keys_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
-def build_resync_keys_request(
+def build_list_keys_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listKeys",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
@@ -316,7 +322,7 @@ def build_resync_keys_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -327,49 +333,54 @@ def build_resync_keys_request(
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
-def build_list_by_subscription_request(
- subscription_id: str, *, skip: Optional[str] = None, **kwargs: Any
+def build_list_notebook_access_token_request(
+ resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
- "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/workspaces"
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookAccessToken",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
- if skip is not None:
- _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
-def build_list_notebook_access_token_request(
+def build_list_notebook_keys_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookAccessToken",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookKeys",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
@@ -381,7 +392,7 @@ def build_list_notebook_access_token_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -392,19 +403,19 @@ def build_list_notebook_access_token_request(
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
-def build_prepare_notebook_request(
+def build_list_storage_account_keys_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/prepareNotebook",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listStorageAccountKeys",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
@@ -416,7 +427,7 @@ def build_prepare_notebook_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -427,19 +438,19 @@ def build_prepare_notebook_request(
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
-def build_list_storage_account_keys_request(
+def build_list_outbound_network_dependencies_endpoints_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listStorageAccountKeys",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundNetworkDependenciesEndpoints",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
@@ -451,7 +462,7 @@ def build_list_storage_account_keys_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -459,22 +470,22 @@ def build_list_storage_account_keys_request(
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
-def build_list_notebook_keys_request(
+def build_prepare_notebook_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookKeys",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/prepareNotebook",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
@@ -486,7 +497,7 @@ def build_list_notebook_keys_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -497,19 +508,19 @@ def build_list_notebook_keys_request(
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
-def build_list_outbound_network_dependencies_endpoints_request(
+def build_resync_keys_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
- "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundNetworkDependenciesEndpoints",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
@@ -521,7 +532,7 @@ def build_list_outbound_network_dependencies_endpoints_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -529,7 +540,7 @@ def build_list_outbound_network_dependencies_endpoints_request(
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
- return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class WorkspacesOperations:
@@ -552,19 +563,28 @@ def __init__(self, *args, **kwargs):
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
- def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> _models.Workspace:
- """Gets the properties of the specified machine learning workspace.
+ def list_by_subscription(
+ self, kind: Optional[str] = None, skip: Optional[str] = None, **kwargs: Any
+ ) -> Iterable["_models.Workspace"]:
+ """Lists all the available machine learning workspaces under the specified subscription.
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
+ Lists all the available machine learning workspaces under the specified subscription.
+
+ :param kind: Kind of workspace. Default value is None.
+ :type kind: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: Workspace or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.Workspace
+ :return: An iterator like instance of either Workspace or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
"""
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
+
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -573,50 +593,94 @@ def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> _
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+ def prepare_request(next_link=None):
+ if not next_link:
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
+ request = build_list_by_subscription_request(
+ subscription_id=self._config.subscription_id,
+ kind=kind,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.list_by_subscription.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
- request = build_get_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.get.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
- _stream = False
- pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
- response = pipeline_response.http_response
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
- deserialized = self._deserialize("Workspace", pipeline_response)
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if cls:
- return cls(pipeline_response, deserialized, {})
+ return pipeline_response
- return deserialized
+ return ItemPaged(get_next, extract_data)
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
+ list_by_subscription.metadata = {
+ "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/workspaces"
}
- def _create_or_update_initial(
- self, resource_group_name: str, workspace_name: str, parameters: Union[_models.Workspace, IO], **kwargs: Any
- ) -> Optional[_models.Workspace]:
+ @distributed_trace
+ def list_by_resource_group(
+ self, resource_group_name: str, kind: Optional[str] = None, skip: Optional[str] = None, **kwargs: Any
+ ) -> Iterable["_models.Workspace"]:
+ """Lists all the available machine learning workspaces under the specified resource group.
+
+ Lists all the available machine learning workspaces under the specified resource group.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param kind: Kind of workspace. Default value is None.
+ :type kind: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either Workspace or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
+
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -625,30 +689,93 @@ def _create_or_update_initial(
}
error_map.update(kwargs.pop("error_map", {}) or {})
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+ def prepare_request(next_link=None):
+ if not next_link:
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.Workspace]] = kwargs.pop("cls", None)
+ request = build_list_by_resource_group_request(
+ resource_group_name=resource_group_name,
+ subscription_id=self._config.subscription_id,
+ kind=kind,
+ skip=skip,
+ api_version=api_version,
+ template_url=self.list_by_resource_group.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
- content_type = content_type or "application/json"
- _json = None
- _content = None
- if isinstance(parameters, (IOBase, bytes)):
- _content = parameters
- else:
- _json = self._serialize.body(parameters, "Workspace")
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+ request.method = "GET"
+ return request
- request = build_create_or_update_request(
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ list_by_resource_group.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces"
+ }
+
+ def _delete_initial( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, workspace_name: str, force_to_purge: bool = False, **kwargs: Any
+ ) -> None:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
+ force_to_purge=force_to_purge,
api_version=api_version,
- content_type=content_type,
- json=_json,
- content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
+ template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
@@ -662,113 +789,38 @@ def _create_or_update_initial(
response = pipeline_response.http_response
- if response.status_code not in [200, 202]:
+ if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
- if response.status_code == 200:
- deserialized = self._deserialize("Workspace", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, None, response_headers)
- return deserialized
-
- _create_or_update_initial.metadata = {
+ _delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
- @overload
- def begin_create_or_update(
- self,
- resource_group_name: str,
- workspace_name: str,
- parameters: _models.Workspace,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> LROPoller[_models.Workspace]:
- """Creates or updates a workspace with the specified parameters.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :param parameters: The parameters for creating or updating a machine learning workspace.
- Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.Workspace
- :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
- Default value is "application/json".
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- :return: An instance of LROPoller that returns either Workspace or the result of cls(response)
- :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
- @overload
- def begin_create_or_update(
- self,
- resource_group_name: str,
- workspace_name: str,
- parameters: IO,
- *,
- content_type: str = "application/json",
- **kwargs: Any
- ) -> LROPoller[_models.Workspace]:
- """Creates or updates a workspace with the specified parameters.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :param parameters: The parameters for creating or updating a machine learning workspace.
- Required.
- :type parameters: IO
- :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
- Default value is "application/json".
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- :return: An instance of LROPoller that returns either Workspace or the result of cls(response)
- :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
-
@distributed_trace
- def begin_create_or_update(
- self, resource_group_name: str, workspace_name: str, parameters: Union[_models.Workspace, IO], **kwargs: Any
- ) -> LROPoller[_models.Workspace]:
- """Creates or updates a workspace with the specified parameters.
+ def begin_delete(
+ self, resource_group_name: str, workspace_name: str, force_to_purge: bool = False, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes a machine learning workspace.
+
+ Deletes a machine learning workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameters for creating or updating a machine learning workspace. Is
- either a Workspace type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.Workspace or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
+ :param force_to_purge: Flag to indicate delete is a purge request. Default value is False.
+ :type force_to_purge: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
@@ -777,26 +829,24 @@ def begin_create_or_update(
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
- :return: An instance of LROPoller that returns either Workspace or the result of cls(response)
- :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
+ cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._create_or_update_initial(
+ raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
- parameters=parameters,
+ force_to_purge=force_to_purge,
api_version=api_version,
- content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
@@ -804,14 +854,14 @@ def begin_create_or_update(
)
kwargs.pop("error_map", None)
- def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Workspace", pipeline_response)
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, deserialized, {})
- return deserialized
+ return cls(pipeline_response, None, {})
if polling is True:
- polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
@@ -825,13 +875,26 @@ def get_long_running_output(pipeline_response):
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- begin_create_or_update.metadata = {
+ begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
- def _delete_initial( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> None:
+ @distributed_trace
+ def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> _models.Workspace:
+ """Gets the properties of the specified machine learning workspace.
+
+ Gets the properties of the specified machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: Workspace or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.Workspace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -844,14 +907,14 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
- request = build_delete_request(
+ request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
+ template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
@@ -865,79 +928,19 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response = pipeline_response.http_response
- if response.status_code not in [200, 202, 204]:
+ if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if cls:
- return cls(pipeline_response, None, {})
-
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
-
- @distributed_trace
- def begin_delete(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> LROPoller[None]:
- """Deletes a machine learning workspace.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- :return: An instance of LROPoller that returns either None or the result of cls(response)
- :rtype: ~azure.core.polling.LROPoller[None]
- :raises ~azure.core.exceptions.HttpResponseError:
- """
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
- polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- api_version=api_version,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs
- )
- kwargs.pop("error_map", None)
+ deserialized = self._deserialize("Workspace", pipeline_response)
- def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
- if cls:
- return cls(pipeline_response, None, {})
+ if cls:
+ return cls(pipeline_response, deserialized, {})
- if polling is True:
- polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
- elif polling is False:
- polling_method = cast(PollingMethod, NoPolling())
- else:
- polling_method = polling
- if cont_token:
- return LROPoller.from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+ return deserialized
- begin_delete.metadata = {
+ get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
@@ -945,7 +948,7 @@ def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Union[_models.WorkspaceUpdateParameters, IO],
+ body: Union[_models.WorkspaceUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.Workspace]:
error_map = {
@@ -966,10 +969,10 @@ def _update_initial(
content_type = content_type or "application/json"
_json = None
_content = None
- if isinstance(parameters, (IOBase, bytes)):
- _content = parameters
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
else:
- _json = self._serialize.body(parameters, "WorkspaceUpdateParameters")
+ _json = self._serialize.body(body, "WorkspaceUpdateParameters")
request = build_update_request(
resource_group_name=resource_group_name,
@@ -1016,20 +1019,22 @@ def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: _models.WorkspaceUpdateParameters,
+ body: _models.WorkspaceUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Workspace]:
"""Updates a machine learning workspace with the specified parameters.
+ Updates a machine learning workspace with the specified parameters.
+
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameters for updating a machine learning workspace. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters
+ :param body: The parameters for updating a machine learning workspace. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
@@ -1051,20 +1056,22 @@ def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: IO,
+ body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Workspace]:
"""Updates a machine learning workspace with the specified parameters.
+ Updates a machine learning workspace with the specified parameters.
+
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameters for updating a machine learning workspace. Required.
- :type parameters: IO
+ :param body: The parameters for updating a machine learning workspace. Required.
+ :type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
@@ -1086,19 +1093,21 @@ def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Union[_models.WorkspaceUpdateParameters, IO],
+ body: Union[_models.WorkspaceUpdateParameters, IO],
**kwargs: Any
) -> LROPoller[_models.Workspace]:
"""Updates a machine learning workspace with the specified parameters.
+ Updates a machine learning workspace with the specified parameters.
+
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameters for updating a machine learning workspace. Is either a
+ :param body: The parameters for updating a machine learning workspace. Is either a
WorkspaceUpdateParameters type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters or IO
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
@@ -1127,7 +1136,7 @@ def begin_update(
raw_result = self._update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
- parameters=parameters,
+ body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
@@ -1162,103 +1171,237 @@ def get_long_running_output(pipeline_response):
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
- @distributed_trace
- def list_by_resource_group(
- self, resource_group_name: str, skip: Optional[str] = None, **kwargs: Any
- ) -> Iterable["_models.Workspace"]:
- """Lists all the available machine learning workspaces under the specified resource group.
+ def _create_or_update_initial(
+ self, resource_group_name: str, workspace_name: str, body: Union[_models.Workspace, IO], **kwargs: Any
+ ) -> Optional[_models.Workspace]:
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Optional[_models.Workspace]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "Workspace")
+
+ request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ template_url=self._create_or_update_initial.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("Workspace", pipeline_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+
+ _create_or_update_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
+ }
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: _models.Workspace,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.Workspace]:
+ """Creates or updates a workspace with the specified parameters.
+
+ Creates or updates a workspace with the specified parameters.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param skip: Continuation token for pagination. Default value is None.
- :type skip: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: The parameters for creating or updating a machine learning workspace. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Workspace
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either Workspace or the result of cls(response)
- :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either Workspace or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: IO,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.Workspace]:
+ """Creates or updates a workspace with the specified parameters.
- def prepare_request(next_link=None):
- if not next_link:
+ Creates or updates a workspace with the specified parameters.
- request = build_list_by_resource_group_request(
- resource_group_name=resource_group_name,
- subscription_id=self._config.subscription_id,
- skip=skip,
- api_version=api_version,
- template_url=self.list_by_resource_group.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: The parameters for creating or updating a machine learning workspace. Required.
+ :type body: IO
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either Workspace or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
- else:
- # make call to next link with the client's api-version
- _parsed_next_link = urllib.parse.urlparse(next_link)
- _next_request_params = case_insensitive_dict(
- {
- key: [urllib.parse.quote(v) for v in value]
- for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
- }
- )
- _next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
- "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ @distributed_trace
+ def begin_create_or_update(
+ self, resource_group_name: str, workspace_name: str, body: Union[_models.Workspace, IO], **kwargs: Any
+ ) -> LROPoller[_models.Workspace]:
+ """Creates or updates a workspace with the specified parameters.
- def extract_data(pipeline_response):
- deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
- list_of_elem = deserialized.value
- if cls:
- list_of_elem = cls(list_of_elem) # type: ignore
- return deserialized.next_link or None, iter(list_of_elem)
+ Creates or updates a workspace with the specified parameters.
- def get_next(next_link=None):
- request = prepare_request(next_link)
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :param body: The parameters for creating or updating a machine learning workspace. Is either a
+ Workspace type or a IO type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Workspace or IO
+ :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
+ Default value is None.
+ :paramtype content_type: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either Workspace or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- _stream = False
- pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ kwargs.pop("error_map", None)
- return pipeline_response
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("Workspace", pipeline_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
- return ItemPaged(get_next, extract_data)
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- list_by_resource_group.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces"
+ begin_create_or_update.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
}
def _diagnose_initial(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
+ body: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
**kwargs: Any
) -> Optional[_models.DiagnoseResponseResult]:
error_map = {
@@ -1279,11 +1422,11 @@ def _diagnose_initial(
content_type = content_type or "application/json"
_json = None
_content = None
- if isinstance(parameters, (IOBase, bytes)):
- _content = parameters
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
else:
- if parameters is not None:
- _json = self._serialize.body(parameters, "DiagnoseWorkspaceParameters")
+ if body is not None:
+ _json = self._serialize.body(body, "DiagnoseWorkspaceParameters")
else:
_json = None
@@ -1337,7 +1480,7 @@ def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[_models.DiagnoseWorkspaceParameters] = None,
+ body: Optional[_models.DiagnoseWorkspaceParameters] = None,
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1349,10 +1492,10 @@ def begin_diagnose(
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameter of diagnosing workspace health. Default value is None.
- :type parameters: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters
+ :param body: The parameter of diagnosing workspace health. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
@@ -1376,7 +1519,7 @@ def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[IO] = None,
+ body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1388,10 +1531,10 @@ def begin_diagnose(
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameter of diagnosing workspace health. Default value is None.
- :type parameters: IO
+ :param body: The parameter of diagnosing workspace health. Default value is None.
+ :type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
@@ -1415,7 +1558,7 @@ def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
+ body: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
**kwargs: Any
) -> LROPoller[_models.DiagnoseResponseResult]:
"""Diagnose workspace setup issue.
@@ -1425,11 +1568,11 @@ def begin_diagnose(
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
- :param parameters: The parameter of diagnosing workspace health. Is either a
+ :param body: The parameter of diagnosing workspace health. Is either a
DiagnoseWorkspaceParameters type or a IO type. Default value is None.
- :type parameters: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters or IO
+ :type body: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
@@ -1460,7 +1603,7 @@ def begin_diagnose(
raw_result = self._diagnose_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
- parameters=parameters,
+ body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
@@ -1504,10 +1647,13 @@ def list_keys(
"""Lists all the keys associated with this workspace. This includes keys for the storage account,
app insights and password for container registry.
+ Lists all the keys associated with this workspace. This includes keys for the storage account,
+ app insights and password for container registry.
+
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListWorkspaceKeysResult or the result of cls(response)
@@ -1563,9 +1709,24 @@ def list_keys(
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listKeys"
}
- def _resync_keys_initial( # pylint: disable=inconsistent-return-statements
+ @distributed_trace
+ def list_notebook_access_token(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> None:
+ ) -> _models.NotebookAccessTokenResult:
+ """Get Azure Machine Learning Workspace notebook access token.
+
+ Get Azure Machine Learning Workspace notebook access token.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: NotebookAccessTokenResult or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.NotebookAccessTokenResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -1578,14 +1739,14 @@ def _resync_keys_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[_models.NotebookAccessTokenResult] = kwargs.pop("cls", None)
- request = build_resync_keys_request(
+ request = build_list_notebook_access_token_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._resync_keys_initial.metadata["url"],
+ template_url=self.list_notebook_access_token.metadata["url"],
headers=_headers,
params=_params,
)
@@ -1599,100 +1760,107 @@ def _resync_keys_initial( # pylint: disable=inconsistent-return-statements
response = pipeline_response.http_response
- if response.status_code not in [200, 202]:
+ if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = self._deserialize("NotebookAccessTokenResult", pipeline_response)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, {})
- _resync_keys_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
+ return deserialized
+
+ list_notebook_access_token.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookAccessToken"
}
@distributed_trace
- def begin_resync_keys(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> LROPoller[None]:
- """Resync all the keys associated with this workspace. This includes keys for the storage account,
- app insights and password for container registry.
+ def list_notebook_keys(
+ self, resource_group_name: str, workspace_name: str, **kwargs: Any
+ ) -> _models.ListNotebookKeysResult:
+ """Lists keys of Azure Machine Learning Workspaces notebook.
+
+ Lists keys of Azure Machine Learning Workspaces notebook.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
- :return: An instance of LROPoller that returns either None or the result of cls(response)
- :rtype: ~azure.core.polling.LROPoller[None]
+ :return: ListNotebookKeysResult or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
+ error_map = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
- polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
- lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
- cont_token: Optional[str] = kwargs.pop("continuation_token", None)
- if cont_token is None:
- raw_result = self._resync_keys_initial( # type: ignore
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- api_version=api_version,
- cls=lambda x, y, z: x,
- headers=_headers,
- params=_params,
- **kwargs
- )
- kwargs.pop("error_map", None)
+ cls: ClsType[_models.ListNotebookKeysResult] = kwargs.pop("cls", None)
- def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
- if cls:
- return cls(pipeline_response, None, {})
+ request = build_list_notebook_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list_notebook_keys.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
- if polling is True:
- polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
- elif polling is False:
- polling_method = cast(PollingMethod, NoPolling())
- else:
- polling_method = polling
- if cont_token:
- return LROPoller.from_continuation_token(
- polling_method=polling_method,
- continuation_token=cont_token,
- client=self._client,
- deserialization_callback=get_long_running_output,
- )
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
- begin_resync_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ListNotebookKeysResult", pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+
+ list_notebook_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookKeys"
}
@distributed_trace
- def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Iterable["_models.Workspace"]:
- """Lists all the available machine learning workspaces under the specified subscription.
+ def list_storage_account_keys(
+ self, resource_group_name: str, workspace_name: str, **kwargs: Any
+ ) -> _models.ListStorageAccountKeysResult:
+ """Lists keys of Azure Machine Learning Workspace's storage account.
- :param skip: Continuation token for pagination. Default value is None.
- :type skip: str
+ Lists keys of Azure Machine Learning Workspace's storage account.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
+ :type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either Workspace or the result of cls(response)
- :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
+ :return: ListStorageAccountKeysResult or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ListStorageAccountKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
-
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -1701,81 +1869,65 @@ def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Ite
}
error_map.update(kwargs.pop("error_map", {}) or {})
- def prepare_request(next_link=None):
- if not next_link:
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- request = build_list_by_subscription_request(
- subscription_id=self._config.subscription_id,
- skip=skip,
- api_version=api_version,
- template_url=self.list_by_subscription.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ListStorageAccountKeysResult] = kwargs.pop("cls", None)
- else:
- # make call to next link with the client's api-version
- _parsed_next_link = urllib.parse.urlparse(next_link)
- _next_request_params = case_insensitive_dict(
- {
- key: [urllib.parse.quote(v) for v in value]
- for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
- }
- )
- _next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
- "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ request = build_list_storage_account_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ template_url=self.list_storage_account_keys.metadata["url"],
+ headers=_headers,
+ params=_params,
+ )
+ request = _convert_request(request)
+ request.url = self._client.format_url(request.url)
- def extract_data(pipeline_response):
- deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
- list_of_elem = deserialized.value
- if cls:
- list_of_elem = cls(list_of_elem) # type: ignore
- return deserialized.next_link or None, iter(list_of_elem)
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ request, stream=_stream, **kwargs
+ )
- def get_next(next_link=None):
- request = prepare_request(next_link)
+ response = pipeline_response.http_response
- _stream = False
- pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
- response = pipeline_response.http_response
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = self._deserialize("ListStorageAccountKeysResult", pipeline_response)
- return pipeline_response
+ if cls:
+ return cls(pipeline_response, deserialized, {})
- return ItemPaged(get_next, extract_data)
+ return deserialized
- list_by_subscription.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/workspaces"
+ list_storage_account_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listStorageAccountKeys"
}
@distributed_trace
- def list_notebook_access_token(
+ def list_outbound_network_dependencies_endpoints(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.NotebookAccessTokenResult:
- """return notebook access token and refresh token.
+ ) -> _models.ExternalFQDNResponse:
+ """Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
+ programmatically.
+
+ Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
+ programmatically.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: NotebookAccessTokenResult or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.NotebookAccessTokenResult
+ :return: ExternalFQDNResponse or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ExternalFQDNResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
@@ -1790,14 +1942,14 @@ def list_notebook_access_token(
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.NotebookAccessTokenResult] = kwargs.pop("cls", None)
+ cls: ClsType[_models.ExternalFQDNResponse] = kwargs.pop("cls", None)
- request = build_list_notebook_access_token_request(
+ request = build_list_outbound_network_dependencies_endpoints_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_notebook_access_token.metadata["url"],
+ template_url=self.list_outbound_network_dependencies_endpoints.metadata["url"],
headers=_headers,
params=_params,
)
@@ -1816,15 +1968,15 @@ def list_notebook_access_token(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("NotebookAccessTokenResult", pipeline_response)
+ deserialized = self._deserialize("ExternalFQDNResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
- list_notebook_access_token.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookAccessToken"
+ list_outbound_network_dependencies_endpoints.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundNetworkDependenciesEndpoints"
}
def _prepare_notebook_initial(
@@ -1869,11 +2021,16 @@ def _prepare_notebook_initial(
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
+ response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("NotebookResourceInfo", pipeline_response)
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, response_headers)
return deserialized
@@ -1885,12 +2042,14 @@ def _prepare_notebook_initial(
def begin_prepare_notebook(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
) -> LROPoller[_models.NotebookResourceInfo]:
- """Prepare a notebook.
+ """Prepare Azure Machine Learning Workspace's notebook resource.
+
+ Prepare Azure Machine Learning Workspace's notebook resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
@@ -1953,87 +2112,9 @@ def get_long_running_output(pipeline_response):
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/prepareNotebook"
}
- @distributed_trace
- def list_storage_account_keys(
- self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.ListStorageAccountKeysResult:
- """List storage account keys of a workspace.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: ListStorageAccountKeysResult or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.ListStorageAccountKeysResult
- :raises ~azure.core.exceptions.HttpResponseError:
- """
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
- _headers = kwargs.pop("headers", {}) or {}
- _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
-
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.ListStorageAccountKeysResult] = kwargs.pop("cls", None)
-
- request = build_list_storage_account_keys_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.list_storage_account_keys.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
-
- deserialized = self._deserialize("ListStorageAccountKeysResult", pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
-
- list_storage_account_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listStorageAccountKeys"
- }
-
- @distributed_trace
- def list_notebook_keys(
+ def _resync_keys_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.ListNotebookKeysResult:
- """List keys of a notebook.
-
- :param resource_group_name: The name of the resource group. The name is case insensitive.
- Required.
- :type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
- :type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: ListNotebookKeysResult or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
- :raises ~azure.core.exceptions.HttpResponseError:
- """
+ ) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
@@ -2046,14 +2127,14 @@ def list_notebook_keys(
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.ListNotebookKeysResult] = kwargs.pop("cls", None)
+ cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_list_notebook_keys_request(
+ request = build_resync_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_notebook_keys.metadata["url"],
+ template_url=self._resync_keys_initial.metadata["url"],
headers=_headers,
params=_params,
)
@@ -2067,87 +2148,89 @@ def list_notebook_keys(
response = pipeline_response.http_response
- if response.status_code not in [200]:
+ if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ListNotebookKeysResult", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, None, response_headers)
- list_notebook_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookKeys"
+ _resync_keys_initial.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
}
@distributed_trace
- def list_outbound_network_dependencies_endpoints(
- self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> _models.ExternalFQDNResponse:
- """Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
- programmatically.
+ def begin_resync_keys(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> LROPoller[None]:
+ """Resync all the keys associated with this workspace.This includes keys for the storage account,
+ app insights and password for container registry.
- Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
- programmatically.
+ Resync all the keys associated with this workspace.This includes keys for the storage account,
+ app insights and password for container registry.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :param workspace_name: Azure Machine Learning Workspace Name. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: ExternalFQDNResponse or the result of cls(response)
- :rtype: ~azure.mgmt.machinelearningservices.models.ExternalFQDNResponse
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
+ operation to not poll, or pass in your own initialized polling object for a personal polling
+ strategy.
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
- 401: ClientAuthenticationError,
- 404: ResourceNotFoundError,
- 409: ResourceExistsError,
- 304: ResourceNotModifiedError,
- }
- error_map.update(kwargs.pop("error_map", {}) or {})
-
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.ExternalFQDNResponse] = kwargs.pop("cls", None)
-
- request = build_list_outbound_network_dependencies_endpoints_request(
- resource_group_name=resource_group_name,
- workspace_name=workspace_name,
- subscription_id=self._config.subscription_id,
- api_version=api_version,
- template_url=self.list_outbound_network_dependencies_endpoints.metadata["url"],
- headers=_headers,
- params=_params,
- )
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
-
- _stream = False
- pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
- )
-
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
- raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
-
- deserialized = self._deserialize("ExternalFQDNResponse", pipeline_response)
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._resync_keys_initial( # type: ignore
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ kwargs.pop("error_map", None)
- if cls:
- return cls(pipeline_response, deserialized, {})
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {})
- return deserialized
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- list_outbound_network_dependencies_endpoints.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundNetworkDependenciesEndpoints"
+ begin_resync_keys.metadata = {
+ "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
}
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/create_or_update.py
new file mode 100644
index 000000000000..daf90e0e48e8
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/create_or_update.py
@@ -0,0 +1,49 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.capacity_reservation_groups.create_or_update(
+ resource_group_name="test-rg",
+ group_id="string",
+ body={
+ "identity": {"type": "SystemAssigned", "userAssignedIdentities": {"string": {}}},
+ "kind": "string",
+ "location": "string",
+ "properties": {"offer": {"offerName": "string", "publisher": "string"}, "reservedCapacity": 1},
+ "sku": {"capacity": 1, "family": "string", "name": "string", "size": "string", "tier": "Premium"},
+ "tags": {},
+ },
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/CapacityReservationGroup/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/delete.py
new file mode 100644
index 000000000000..030bc8539d5b
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/delete.py
@@ -0,0 +1,40 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.capacity_reservation_groups.delete(
+ resource_group_name="test-rg",
+ group_id="string",
+ )
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/CapacityReservationGroup/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/get.py
new file mode 100644
index 000000000000..1b251ae801ed
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/get.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.capacity_reservation_groups.get(
+ resource_group_name="test-rg",
+ group_id="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/CapacityReservationGroup/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/list.py
new file mode 100644
index 000000000000..989999c71235
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/list.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.capacity_reservation_groups.list(
+ resource_group_name="test-rg",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/CapacityReservationGroup/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/list_by_subscription.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/list_by_subscription.py
new file mode 100644
index 000000000000..ecd8509ed484
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/list_by_subscription.py
@@ -0,0 +1,39 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list_by_subscription.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.capacity_reservation_groups.capacity_reservation_groups_list_by_subscription()
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/CapacityReservationGroup/listBySubscription.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/update.py
new file mode 100644
index 000000000000..72c16bdd6ed4
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/capacity_reservation_group/update.py
@@ -0,0 +1,45 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.capacity_reservation_groups.update(
+ resource_group_name="test-rg",
+ group_id="string",
+ body={
+ "identity": {"type": "SystemAssigned", "userAssignedIdentities": {"string": {}}},
+ "sku": {"capacity": 1, "family": "string", "name": "string", "size": "string", "tier": "Standard"},
+ "tags": {},
+ },
+ )
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/CapacityReservationGroup/update.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aks_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aks_compute.py
index 6dca84bdc7dd..75f966dc7672 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aks_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aks_compute.py
@@ -46,6 +46,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/AKSCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/createOrUpdate/AKSCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aml_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aml_compute.py
index efbef7e090e7..e3aa038b10ef 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aml_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aml_compute.py
@@ -47,6 +47,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/AmlCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/createOrUpdate/AmlCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aks_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aks_compute.py
index c4f5100e5166..61efdd17e383 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aks_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aks_compute.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/BasicAKSCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/createOrUpdate/BasicAKSCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aml_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aml_compute.py
index 4e8789e3ef24..6184a6e6781b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aml_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aml_compute.py
@@ -55,6 +55,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/BasicAmlCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/createOrUpdate/BasicAmlCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_data_factory_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_data_factory_compute.py
index 322dec99f0ea..df362b7e1b00 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_data_factory_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_data_factory_compute.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/BasicDataFactoryCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/createOrUpdate/BasicDataFactoryCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance.py
index d2e8342bc1ad..63612cd00ebd 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance.py
@@ -39,30 +39,45 @@ def main():
"computeType": "ComputeInstance",
"properties": {
"applicationSharingPolicy": "Personal",
+ "autologgerSettings": {"mlflowAutologger": "Enabled"},
"computeInstanceAuthorizationType": "personal",
"customServices": [
{
"docker": {"privileged": True},
- "endpoints": [{"name": "connect", "protocol": "http", "published": 8787, "target": 8787}],
- "environmentVariables": {"test_variable": {"type": "local", "value": "test_value"}},
- "image": {"reference": "ghcr.io/azure/rocker-rstudio-ml-verse:latest", "type": "docker"},
- "name": "rstudio",
+ "endpoints": [
+ {
+ "hostIp": None,
+ "name": "connect",
+ "protocol": "http",
+ "published": 4444,
+ "target": 8787,
+ }
+ ],
+ "environmentVariables": {
+ "RSP_LICENSE": {"type": "local", "value": "XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX"}
+ },
+ "image": {"reference": "ghcr.io/azure/rstudio-workbench:latest", "type": "docker"},
+ "name": "rstudio-workbench",
"volumes": [
{
- "readOnly": False,
- "source": "/home/azureuser/cloudfiles",
- "target": "/home/azureuser/cloudfiles",
+ "readOnly": True,
+ "source": "/mnt/azureuser/",
+ "target": "/home/testuser/",
"type": "bind",
}
],
}
],
+ "enableOSPatching": True,
+ "enableRootAccess": True,
+ "enableSSO": True,
"personalComputeInstanceSettings": {
"assignedUser": {
"objectId": "00000000-0000-0000-0000-000000000000",
"tenantId": "00000000-0000-0000-0000-000000000000",
}
},
+ "releaseQuotaOnStop": True,
"sshSettings": {"sshPublicAccess": "Disabled"},
"subnet": {"id": "test-subnet-resource-id"},
"vmSize": "STANDARD_NC6",
@@ -73,6 +88,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/ComputeInstance.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/createOrUpdate/ComputeInstance.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_minimal.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_minimal.py
index 575439d32563..8e1ce625749a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_minimal.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_minimal.py
@@ -41,6 +41,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/ComputeInstanceMinimal.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/createOrUpdate/ComputeInstanceMinimal.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_with_schedules.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_with_schedules.py
index 4134df53117f..f3ba277f5714 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_with_schedules.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_with_schedules.py
@@ -69,6 +69,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/ComputeInstanceWithSchedules.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/createOrUpdate/ComputeInstanceWithSchedules.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/kubernetes_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/kubernetes_compute.py
index 07fc5057133b..5c91ec216c2a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/kubernetes_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/kubernetes_compute.py
@@ -58,6 +58,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/KubernetesCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/createOrUpdate/KubernetesCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/delete.py
index 03e8f509fbae..f10741d556ea 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/delete.py
@@ -37,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aks_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aks_compute.py
index 4692f63ff9d7..36382b240d7d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aks_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aks_compute.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/get/AKSCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/get/AKSCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aml_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aml_compute.py
index 59ea7d05e296..5a0fea75859b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aml_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aml_compute.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/get/AmlCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/get/AmlCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/compute_instance.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/compute_instance.py
index fe66677f7193..f6b8d03562b0 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/compute_instance.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/compute_instance.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/get/ComputeInstance.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/get/ComputeInstance.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/kubernetes_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/kubernetes_compute.py
index b09cac4f3f62..1d794562e79a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/kubernetes_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/kubernetes_compute.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/get/KubernetesCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/get/KubernetesCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get_allowed_vm_sizes_for_resize.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get_allowed_vm_sizes_for_resize.py
new file mode 100644
index 000000000000..50a071a75216
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get_allowed_vm_sizes_for_resize.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get_allowed_vm_sizes_for_resize.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="34adfa4f-cedf-4dc0-ba29-b6d1a69ab345",
+ )
+
+ response = client.compute.get_allowed_resize_sizes(
+ resource_group_name="testrg123",
+ workspace_name="workspaces123",
+ compute_name="compute123",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/getAllowedVMSizesForResize.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list.py
index 884ceeb73b59..b57d868861f1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_keys.py
index ed185cc80326..8c1d8f41bd20 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_keys.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_nodes.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_nodes.py
index 66ee6a0f4bee..cafe7e6f4e2f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_nodes.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_nodes.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/listNodes.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/listNodes.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/patch.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/patch.py
index 344934f7929b..2e7b7196348a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/patch.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/patch.py
@@ -44,6 +44,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/patch.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/patch.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/resize.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/resize.py
new file mode 100644
index 000000000000..ddee93cb2c84
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/resize.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python resize.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="34adfa4f-cedf-4dc0-ba29-b6d1a69ab345",
+ )
+
+ client.compute.begin_resize(
+ resource_group_name="testrg123",
+ workspace_name="workspaces123",
+ compute_name="compute123",
+ parameters={"targetVMSize": "Standard_DS11_v2"},
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/resize.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/restart.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/restart.py
index 6f314eb40ed9..9065d9279953 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/restart.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/restart.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/restart.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/restart.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/start.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/start.py
index 9e6d787b739b..bc38beb19301 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/start.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/start.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/start.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/start.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/stop.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/stop.py
index 431311991b67..112e0878cf1c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/stop.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/stop.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/stop.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/stop.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/update_custom_services.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/update_custom_services.py
new file mode 100644
index 000000000000..bb6fc86f4dca
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/update_custom_services.py
@@ -0,0 +1,57 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python update_custom_services.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="34adfa4f-cedf-4dc0-ba29-b6d1a69ab345",
+ )
+
+ client.compute.update_custom_services(
+ resource_group_name="testrg123",
+ workspace_name="workspaces123",
+ compute_name="compute123",
+ custom_services=[
+ {
+ "docker": {"privileged": True},
+ "endpoints": [
+ {"hostIp": None, "name": "connect", "protocol": "http", "published": 4444, "target": 8787}
+ ],
+ "environmentVariables": {
+ "RSP_LICENSE": {"type": "local", "value": "XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX"}
+ },
+ "image": {"reference": "ghcr.io/azure/rstudio-workbench:latest", "type": "docker"},
+ "name": "rstudio-workbench",
+ "volumes": [
+ {"readOnly": True, "source": "/mnt/azureuser/", "target": "/home/testuser/", "type": "bind"}
+ ],
+ }
+ ],
+ )
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/updateCustomServices.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/update_idle_shutdown_setting.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/update_idle_shutdown_setting.py
new file mode 100644
index 000000000000..7221a16f3baf
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/update_idle_shutdown_setting.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python update_idle_shutdown_setting.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="34adfa4f-cedf-4dc0-ba29-b6d1a69ab345",
+ )
+
+ client.compute.update_idle_shutdown_setting(
+ resource_group_name="testrg123",
+ workspace_name="workspaces123",
+ compute_name="compute123",
+ parameters={"idleTimeBeforeShutdown": "PT120M"},
+ )
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Compute/updateIdleShutdownSetting.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_blob_waccount_key/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_blob_waccount_key/create_or_update.py
index f51f887bbd51..ef207039c48f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_blob_waccount_key/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_blob_waccount_key/create_or_update.py
@@ -53,6 +53,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/AzureBlobWAccountKey/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Datastore/AzureBlobWAccountKey/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen1_wservice_principal/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen1_wservice_principal/create_or_update.py
index bfbb9e2ecda5..34adce0979d3 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen1_wservice_principal/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen1_wservice_principal/create_or_update.py
@@ -54,6 +54,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/AzureDataLakeGen1WServicePrincipal/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Datastore/AzureDataLakeGen1WServicePrincipal/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen2_wservice_principal/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen2_wservice_principal/create_or_update.py
index d221f4673621..32642c927aab 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen2_wservice_principal/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen2_wservice_principal/create_or_update.py
@@ -57,6 +57,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/AzureDataLakeGen2WServicePrincipal/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Datastore/AzureDataLakeGen2WServicePrincipal/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_file_waccount_key/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_file_waccount_key/create_or_update.py
index 0ae6c4a4789c..a0689522505e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_file_waccount_key/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_file_waccount_key/create_or_update.py
@@ -53,6 +53,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/AzureFileWAccountKey/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Datastore/AzureFileWAccountKey/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/delete.py
index 78dd652ab719..4eb4b295752b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/delete.py
@@ -36,6 +36,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Datastore/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/get.py
index 93e17e5a0997..de494d818eff 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Datastore/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list.py
index 4b5de229b6e2..bc5c6698b6d4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Datastore/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list_secrets.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list_secrets.py
index 74fe51d50b35..fed5f4034f3c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list_secrets.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list_secrets.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/listSecrets.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Datastore/listSecrets.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/external_fqdn/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/external_fqdn/get.py
index aa82632feffb..f9223214f459 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/external_fqdn/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/external_fqdn/get.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/ExternalFQDN/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/ExternalFQDN/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/get.py
new file mode 100644
index 000000000000..57aea6f7f25a
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/get.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.features.get(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ featureset_name="string",
+ featureset_version="string",
+ feature_name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Feature/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/list.py
new file mode 100644
index 000000000000..8780d9705751
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/list.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.features.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ featureset_name="string",
+ featureset_version="string",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Feature/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/create_or_update.py
index 47f02ffa0535..dd66c7ceb285 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/create_or_update.py
@@ -35,52 +35,59 @@ def main():
id="string",
body={
"properties": {
+ "autologgerSettings": {"mlflowAutologger": "Disabled"},
+ "codeId": "string",
+ "command": "string",
+ "componentId": "string",
"computeId": "string",
"description": "string",
"displayName": "string",
+ "distribution": {"distributionType": "TensorFlow", "parameterServerCount": 1, "workerCount": 1},
"environmentId": "string",
"environmentVariables": {"string": "string"},
"experimentName": "string",
"identity": {"identityType": "AMLToken"},
+ "inputs": {"string": {"description": "string", "jobInputType": "literal", "value": "string"}},
"isArchived": False,
- "jobType": "AutoML",
+ "jobType": "Command",
+ "limits": {"jobLimitsType": "Command", "timeout": "PT5M"},
+ "notificationSetting": {"emailOn": ["JobCompleted"], "emails": ["string"]},
"outputs": {
"string": {
+ "assetName": "string",
+ "assetVersion": "string",
"description": "string",
"jobOutputType": "uri_file",
- "mode": "ReadWriteMount",
+ "mode": "Direct",
"uri": "string",
}
},
"properties": {"string": "string"},
+ "queueSettings": {"jobTier": "Premium", "priority": 1},
"resources": {
+ "dockerArgs": "string",
"instanceCount": 1,
"instanceType": "string",
- "properties": {"string": {"9bec0ab0-c62f-4fa9-a97c-7b24bbcc90ad": None}},
+ "locations": ["string"],
+ "properties": {"string": {"f69c8d5a-9b39-4183-92d3-a2b18944cf95": None}},
+ "shmSize": "2g",
},
"services": {
"string": {
"endpoint": "string",
"jobServiceType": "string",
+ "nodes": {"nodesValueType": "All"},
"port": 1,
"properties": {"string": "string"},
}
},
"tags": {"string": "string"},
- "taskDetails": {
- "limitSettings": {"maxTrials": 2},
- "modelSettings": {"validationCropSize": 2},
- "searchSpace": [{"validationCropSize": "choice(2, 360)"}],
- "targetColumnName": "string",
- "taskType": "ImageClassification",
- "trainingData": {"jobInputType": "mltable", "uri": "string"},
- },
}
},
)
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/AutoMLJob/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/AutoMLJob/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/get.py
index 5744ad654a74..e3fb19c03c3b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/AutoMLJob/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/AutoMLJob/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/list.py
index 2b84eaeaa4ea..d4b7d8ef81c4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/AutoMLJob/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/AutoMLJob/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/cancel.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/cancel.py
index 92009c446172..d46d0171f206 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/cancel.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/cancel.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/cancel.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/cancel.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/create_or_update.py
index d37efcce8ad5..dfff08d650b3 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/create_or_update.py
@@ -35,8 +35,10 @@ def main():
id="string",
body={
"properties": {
+ "autologgerSettings": {"mlflowAutologger": "Disabled"},
"codeId": "string",
"command": "string",
+ "componentId": "string",
"computeId": "string",
"description": "string",
"displayName": "string",
@@ -46,26 +48,35 @@ def main():
"experimentName": "string",
"identity": {"identityType": "AMLToken"},
"inputs": {"string": {"description": "string", "jobInputType": "literal", "value": "string"}},
+ "isArchived": False,
"jobType": "Command",
"limits": {"jobLimitsType": "Command", "timeout": "PT5M"},
+ "notificationSetting": {"emailOn": ["JobCancelled"], "emails": ["string"]},
"outputs": {
"string": {
+ "assetName": "string",
+ "assetVersion": "string",
"description": "string",
"jobOutputType": "uri_file",
- "mode": "ReadWriteMount",
+ "mode": "Upload",
"uri": "string",
}
},
"properties": {"string": "string"},
+ "queueSettings": {"jobTier": "Basic", "priority": 1},
"resources": {
+ "dockerArgs": "string",
"instanceCount": 1,
"instanceType": "string",
- "properties": {"string": {"e6b6493e-7d5e-4db3-be1e-306ec641327e": None}},
+ "locations": ["string"],
+ "properties": {"string": {"c9ac10d0-915b-4de5-afe8-a4c78a37a558": None}},
+ "shmSize": "2g",
},
"services": {
"string": {
"endpoint": "string",
"jobServiceType": "string",
+ "nodes": {"nodesValueType": "All"},
"port": 1,
"properties": {"string": "string"},
}
@@ -77,6 +88,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/CommandJob/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/CommandJob/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/get.py
index da20b17f687c..63730c3b9536 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/CommandJob/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/CommandJob/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/list.py
index 603f7bbbc412..b0f48cd01842 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/CommandJob/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/CommandJob/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/delete.py
index 80e82b2866eb..e349499f10d9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/delete.py
@@ -32,10 +32,10 @@ def main():
client.jobs.begin_delete(
resource_group_name="test-rg",
workspace_name="my-aml-workspace",
- id="http://subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/my-favorite-aml-job",
+ id="string",
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/create_or_update.py
index 8d093b05a24b..b403f1828e35 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/create_or_update.py
@@ -61,6 +61,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/PipelineJob/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/PipelineJob/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/get.py
index 69dd018de1ad..a3406708611b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/PipelineJob/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/PipelineJob/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/list.py
index 2ee9adaf0c1e..d7ea24b6b9fa 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/PipelineJob/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/PipelineJob/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/create_or_update.py
index 39198f670f47..bb60ed013152 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/create_or_update.py
@@ -35,49 +35,59 @@ def main():
id="string",
body={
"properties": {
+ "autologgerSettings": {"mlflowAutologger": "Disabled"},
+ "codeId": "string",
+ "command": "string",
+ "componentId": "string",
"computeId": "string",
"description": "string",
"displayName": "string",
- "earlyTermination": {"delayEvaluation": 1, "evaluationInterval": 1, "policyType": "MedianStopping"},
+ "distribution": {"distributionType": "TensorFlow", "parameterServerCount": 1, "workerCount": 1},
+ "environmentId": "string",
+ "environmentVariables": {"string": "string"},
"experimentName": "string",
- "jobType": "Sweep",
- "limits": {
- "jobLimitsType": "Sweep",
- "maxConcurrentTrials": 1,
- "maxTotalTrials": 1,
- "trialTimeout": "PT1S",
+ "identity": {"identityType": "AMLToken"},
+ "inputs": {"string": {"description": "string", "jobInputType": "literal", "value": "string"}},
+ "isArchived": False,
+ "jobType": "Command",
+ "limits": {"jobLimitsType": "Command", "timeout": "PT5M"},
+ "notificationSetting": {"emailOn": ["JobCompleted"], "emails": ["string"]},
+ "outputs": {
+ "string": {
+ "assetName": "string",
+ "assetVersion": "string",
+ "description": "string",
+ "jobOutputType": "uri_file",
+ "mode": "ReadWriteMount",
+ "uri": "string",
+ }
},
- "objective": {"goal": "Minimize", "primaryMetric": "string"},
"properties": {"string": "string"},
- "samplingAlgorithm": {"samplingAlgorithmType": "Grid"},
- "searchSpace": {"string": {}},
+ "queueSettings": {"jobTier": "Basic", "priority": 1},
+ "resources": {
+ "dockerArgs": "string",
+ "instanceCount": 1,
+ "instanceType": "string",
+ "locations": ["string"],
+ "properties": {"string": {"5fc1f627-491e-45a0-a6a2-f5b4be884911": None}},
+ "shmSize": "2g",
+ },
"services": {
"string": {
"endpoint": "string",
"jobServiceType": "string",
+ "nodes": {"nodesValueType": "All"},
"port": 1,
"properties": {"string": "string"},
}
},
"tags": {"string": "string"},
- "trial": {
- "codeId": "string",
- "command": "string",
- "distribution": {"distributionType": "Mpi", "processCountPerInstance": 1},
- "environmentId": "string",
- "environmentVariables": {"string": "string"},
- "resources": {
- "instanceCount": 1,
- "instanceType": "string",
- "properties": {"string": {"e6b6493e-7d5e-4db3-be1e-306ec641327e": None}},
- },
- },
}
},
)
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/SweepJob/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/SweepJob/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/get.py
index 4c424fedc4e7..e83761df9206 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/SweepJob/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/SweepJob/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/list.py
index 3081f0d1b58d..0c6d6b697c8a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/SweepJob/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/SweepJob/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/update.py
new file mode 100644
index 000000000000..02198ba70d24
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/update.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.jobs.update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ id="string",
+ body={
+ "properties": {
+ "notificationSetting": {"webhooks": {"string": {"eventType": "string", "webhookType": "AzureDevOps"}}}
+ }
+ },
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Job/update.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/create_or_update.py
new file mode 100644
index 000000000000..52b91f68834e
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/create_or_update.py
@@ -0,0 +1,75 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.labeling_jobs.begin_create_or_update(
+ resource_group_name="workspace-1234",
+ workspace_name="testworkspace",
+ id="testLabelingJob",
+ body={
+ "properties": {
+ "description": "string",
+ "jobInstructions": {"uri": "link/to/instructions"},
+ "jobType": "Labeling",
+ "labelCategories": {
+ "myCategory1": {
+ "classes": {
+ "myLabelClass1": {"displayName": "myLabelClass1", "subclasses": {}},
+ "myLabelClass2": {"displayName": "myLabelClass2", "subclasses": {}},
+ },
+ "displayName": "myCategory1Title",
+ "multiSelect": "Disabled",
+ },
+ "myCategory2": {
+ "classes": {
+ "myLabelClass1": {"displayName": "myLabelClass1", "subclasses": {}},
+ "myLabelClass2": {"displayName": "myLabelClass2", "subclasses": {}},
+ },
+ "displayName": "myCategory2Title",
+ "multiSelect": "Disabled",
+ },
+ },
+ "labelingJobMediaProperties": {"mediaType": "Image"},
+ "mlAssistConfiguration": {
+ "inferencingComputeBinding": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/resourceGroup-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/computes/myscoringcompute",
+ "mlAssist": "Enabled",
+ "trainingComputeBinding": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/resourceGroup-1234/providers/Microsoft.MachineLearningServices/workspaces/testworkspace/computes/mytrainingompute",
+ },
+ "properties": {"additionalProp1": "string", "additionalProp2": "string", "additionalProp3": "string"},
+ "tags": {"additionalProp1": "string", "additionalProp2": "string", "additionalProp3": "string"},
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/LabelingJob/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/delete.py
new file mode 100644
index 000000000000..9036617d0308
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/delete.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.labeling_jobs.delete(
+ resource_group_name="workspace-1234",
+ workspace_name="testworkspace",
+ id="testLabelingJob",
+ )
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/LabelingJob/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/export_labels.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/export_labels.py
new file mode 100644
index 000000000000..c85820dff0f3
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/export_labels.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python export_labels.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.labeling_jobs.begin_export_labels(
+ resource_group_name="workspace-1234",
+ workspace_name="testworkspace",
+ id="testLabelingJob",
+ body={"format": "Dataset"},
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/LabelingJob/exportLabels.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/get.py
new file mode 100644
index 000000000000..9e3e0f775bdd
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/get.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.labeling_jobs.get(
+ resource_group_name="workspace-1234",
+ workspace_name="testworkspace",
+ id="testLabelingJob",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/LabelingJob/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/list.py
new file mode 100644
index 000000000000..b3e878b7486f
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/list.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.labeling_jobs.list(
+ resource_group_name="workspace-1234",
+ workspace_name="testworkspace",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/LabelingJob/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/pause.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/pause.py
new file mode 100644
index 000000000000..33bf2c02dea0
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/pause.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python pause.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.labeling_jobs.pause(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ id="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/LabelingJob/pause.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/resume.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/resume.py
new file mode 100644
index 000000000000..d479d8f46ec7
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/labeling_job/resume.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python resume.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.labeling_jobs.begin_resume(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ id="string",
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/LabelingJob/resume.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/create_or_update_rule.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/create_or_update_rule.py
new file mode 100644
index 000000000000..fda2c49accd0
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/create_or_update_rule.py
@@ -0,0 +1,45 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update_rule.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.managed_network_settings_rule.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ rule_name="some_string",
+ body={
+ "properties": {"category": "UserDefined", "destination": "some_string", "status": "Active", "type": "FQDN"}
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/ManagedNetwork/createOrUpdateRule.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/delete_rule.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/delete_rule.py
new file mode 100644
index 000000000000..9b7b14632dcd
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/delete_rule.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete_rule.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.managed_network_settings_rule.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ rule_name="some_string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/ManagedNetwork/deleteRule.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/get_rule.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/get_rule.py
new file mode 100644
index 000000000000..162a0a928a67
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/get_rule.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get_rule.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.managed_network_settings_rule.get(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ rule_name="some_string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/ManagedNetwork/getRule.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/list_rule.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/list_rule.py
new file mode 100644
index 000000000000..b20475678b91
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/list_rule.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list_rule.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.managed_network_settings_rule.list(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/ManagedNetwork/listRule.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/provision.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/provision.py
new file mode 100644
index 000000000000..a9e54349ae9d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/provision.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python provision.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.managed_network_provisions.begin_provision_managed_network(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/ManagedNetwork/provision.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/list_keys.py
index 66a02eb6b469..d0ee6a53441c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/list_keys.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Notebook/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Notebook/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/prepare.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/prepare.py
index 009f750c227c..833b357fe046 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/prepare.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/prepare.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Notebook/prepare.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Notebook/prepare.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/get_logs.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/get_logs.py
index 2bae655720a4..127c67d7eaaa 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/get_logs.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/get_logs.py
@@ -39,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/getLogs.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/getLogs.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/create_or_update.py
index da3956267da7..92f3f3c0c3ab 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/create_or_update.py
@@ -74,6 +74,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/KubernetesOnlineDeployment/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/KubernetesOnlineDeployment/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/get.py
index bccba8918be1..3189b955c371 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/KubernetesOnlineDeployment/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/KubernetesOnlineDeployment/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/list_skus.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/list_skus.py
index 532af7db70b6..f35a4c95131a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/list_skus.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/list_skus.py
@@ -39,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/KubernetesOnlineDeployment/listSkus.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/KubernetesOnlineDeployment/listSkus.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/update.py
index ef4bd08cf870..36995e815171 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/update.py
@@ -42,6 +42,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/KubernetesOnlineDeployment/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/KubernetesOnlineDeployment/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/list.py
index 7df4af070406..5c87e93ef08f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/create_or_update.py
index c890a3f55278..a8ba240886dc 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/create_or_update.py
@@ -77,6 +77,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/ManagedOnlineDeployment/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/ManagedOnlineDeployment/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/get.py
index ccfa935e7f46..e3f2ed6e8074 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/ManagedOnlineDeployment/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/ManagedOnlineDeployment/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/list_skus.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/list_skus.py
index ef2b493c784f..5c6bf3171c6a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/list_skus.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/list_skus.py
@@ -39,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/ManagedOnlineDeployment/listSkus.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/ManagedOnlineDeployment/listSkus.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/update.py
index e7ec5681f716..1f84a981b6bd 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/update.py
@@ -42,6 +42,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/ManagedOnlineDeployment/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/OnlineDeployment/ManagedOnlineDeployment/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/create_or_update.py
index 4f95d6a5bdef..f8ab73bce63b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/create_or_update.py
@@ -33,13 +33,13 @@ def main():
resource_group_name="rg-1234",
workspace_name="testworkspace",
private_endpoint_connection_name="{privateEndpointConnectionName}",
- properties={
+ body={
"properties": {"privateLinkServiceConnectionState": {"description": "Auto-Approved", "status": "Approved"}}
},
)
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateEndpointConnection/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/PrivateEndpointConnection/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/delete.py
index d071ba7e4df5..327268c384ea 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/delete.py
@@ -36,6 +36,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateEndpointConnection/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/PrivateEndpointConnection/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/get.py
index f3a4e42b1c2b..4edf7fdbfbc5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateEndpointConnection/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/PrivateEndpointConnection/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/list.py
index 8a893d04e41b..911ea50d0b37 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateEndpointConnection/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/PrivateEndpointConnection/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_link_resource/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_link_resource/list.py
index 71a768ec1c85..15501acf334d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_link_resource/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_link_resource/list.py
@@ -33,9 +33,10 @@ def main():
resource_group_name="rg-1234",
workspace_name="testworkspace",
)
- print(response)
+ for item in response:
+ print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateLinkResource/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/PrivateLinkResource/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/list.py
index ba614e993d14..c18d62ce76ad 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/list.py
@@ -36,6 +36,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Quota/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Quota/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/update.py
index d135e3ae3a58..001919ad1e6f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/update.py
@@ -51,6 +51,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Quota/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Quota/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_system_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_system_created.py
index 577787fee2b8..379968eeceef 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_system_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_system_created.py
@@ -91,6 +91,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/createOrUpdate-SystemCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/createOrUpdate-SystemCreated.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_user_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_user_created.py
index 4fb0f626522f..81599dbf9009 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_user_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_user_created.py
@@ -75,6 +75,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/createOrUpdate-UserCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/createOrUpdate-UserCreated.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/delete.py
index 2e4a8a929e44..bcb2da702015 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/delete.py
@@ -35,6 +35,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_system_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_system_created.py
index fd5b114248b7..9c18c5b460a0 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_system_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_system_created.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/get-SystemCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/get-SystemCreated.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_user_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_user_created.py
index bd86983df7e3..51f68a66bf19 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_user_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_user_created.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/get-UserCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/get-UserCreated.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_by_subscription.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_by_subscription.py
index d017c80aa223..b0bd1d479342 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_by_subscription.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_by_subscription.py
@@ -34,6 +34,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/listBySubscription.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/listBySubscription.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_system_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_system_created.py
index d9ef5482f599..e8ea6e9325fb 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_system_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_system_created.py
@@ -36,6 +36,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/list-SystemCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/list-SystemCreated.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_user_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_user_created.py
index 193089df5170..26ff0acd9093 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_user_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_user_created.py
@@ -36,6 +36,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/list-UserCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/list-UserCreated.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/remove_regions.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/remove_regions.py
index 75dd8c55aac0..c2ec47e8fc6b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/remove_regions.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/remove_regions.py
@@ -93,6 +93,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/removeRegions.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/removeRegions.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_system_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_system_created.py
index bda97b4ad6d5..120f1d4271b1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_system_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_system_created.py
@@ -41,6 +41,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/update-SystemCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/update-SystemCreated.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_user_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_user_created.py
index d8ecdb855c20..7ad556364a8c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_user_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_user_created.py
@@ -41,6 +41,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/update-UserCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registries/update-UserCreated.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/create_or_update.py
index 3b5010d9d181..45bd44e8ae5f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/create_or_update.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/CodeContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/delete.py
index b72a96432c9f..34876b3d9009 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/delete.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/CodeContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py
index c8b1ce0c2a7c..8ef30ffc7138 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/CodeContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/list.py
index 8f5972265e01..1595d6f8c19c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/CodeContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_get_start_pending_upload.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_get_start_pending_upload.py
index e3e974c61eb9..ad7c17f2c381 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_get_start_pending_upload.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_get_start_pending_upload.py
@@ -34,11 +34,11 @@ def main():
registry_name="registryName",
code_name="string",
version="string",
- body={"pendingUploadId": "string", "pendingUploadType": "TemporaryBlobReference"},
+ body={"pendingUploadId": "string", "pendingUploadType": "None"},
)
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/createOrGetStartPendingUpload.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/CodeVersion/createOrGetStartPendingUpload.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_update.py
index 1df9f062fc28..de90934fc6a7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_update.py
@@ -47,6 +47,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/CodeVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py
index 3e012305c57c..c2cc06b029f9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py
@@ -37,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/CodeVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/get.py
index f62658b63374..773726d65cce 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/CodeVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/list.py
index 4453204ec59d..8f5deefacdff 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/CodeVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/create_or_update.py
index 6c3efd5e6a13..eb87766fee4b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/create_or_update.py
@@ -40,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ComponentContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/delete.py
index aea7dc0c0dea..ec1aae212da9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/delete.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ComponentContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/get.py
index 652246d77411..a1faaa03fac4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ComponentContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/list.py
index af03b40d6482..9c689c57e875 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ComponentContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/create_or_update.py
index 3ef241cc2c8d..61bc5c372c3b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/create_or_update.py
@@ -47,6 +47,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ComponentVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/delete.py
index 24e8ba968dcd..b630c79c1a30 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/delete.py
@@ -37,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ComponentVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/get.py
index 906686be462b..1a6c3f72ab07 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ComponentVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/list.py
index 39d93fd0de0c..65dc5d33aa3f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ComponentVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/create_or_update.py
index 2619016ab138..da276afaa0d1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/create_or_update.py
@@ -46,6 +46,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/DataContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/delete.py
index db9823083c38..c565e15f48b4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/delete.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/DataContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/get.py
index cd457746f179..dc066ed0bae6 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/DataContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/registry_list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/registry_list.py
index 1b5917eb34e7..5843417f4c98 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/registry_list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/registry_list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataContainer/registryList.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/DataContainer/registryList.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_get_start_pending_upload.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_get_start_pending_upload.py
index 9142baba5fad..6216f1249631 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_get_start_pending_upload.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_get_start_pending_upload.py
@@ -39,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/createOrGetStartPendingUpload.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/DataVersionBase/createOrGetStartPendingUpload.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_update.py
index 01da03f03e19..2ebc5bdeff0b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_update.py
@@ -50,6 +50,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/DataVersionBase/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/delete.py
index d8772f5b25d5..b5f05162ad9a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/delete.py
@@ -37,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/DataVersionBase/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/get.py
index fdf5e3b6453f..8aa9f4310937 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/DataVersionBase/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/registry_list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/registry_list.py
index 54d19354ab1b..c67fb2a4ca0f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/registry_list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/registry_list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/registryList.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/DataVersionBase/registryList.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/create_or_update.py
index 931d285c42d8..9855d5ca21bd 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/create_or_update.py
@@ -40,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/delete.py
index 098883e9fffe..5b2ab8631453 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/delete.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/get.py
index bc742dec3e1b..03176f4afce2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/list.py
index 75d05a51b957..e5aa5d0ee2da 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_get_start_pending_upload.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_get_start_pending_upload.py
index a24b50119143..2a90ba88a083 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_get_start_pending_upload.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_get_start_pending_upload.py
@@ -34,11 +34,11 @@ def main():
registry_name="registryName",
model_name="string",
version="string",
- body={"pendingUploadId": "string", "pendingUploadType": "TemporaryBlobReference"},
+ body={"pendingUploadId": "string", "pendingUploadType": "None"},
)
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/createOrGetStartPendingUpload.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelVersion/createOrGetStartPendingUpload.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_update.py
index a2caeebafdd8..60ae48f12a56 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_update.py
@@ -49,6 +49,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/delete.py
index 243da0a3dc5e..23f25a9d64c1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/delete.py
@@ -37,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/get.py
index 3f718017c063..2946b7d0a50c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/list.py
index ca64f031391d..91c31bff1a77 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/package.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/package.py
new file mode 100644
index 000000000000..70160e268ef6
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/package.py
@@ -0,0 +1,62 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python package.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.registry_model_versions.begin_package(
+ resource_group_name="test-rg",
+ registry_name="my-aml-registry",
+ model_name="string",
+ version="string",
+ body={
+ "baseEnvironmentSource": {"baseEnvironmentSourceType": "EnvironmentAsset", "resourceId": "string"},
+ "environmentVariables": {"string": "string"},
+ "inferencingServer": {
+ "codeConfiguration": {"codeId": "string", "scoringScript": "string"},
+ "serverType": "AzureMLBatch",
+ },
+ "inputs": [
+ {
+ "inputType": "UriFile",
+ "mode": "Download",
+ "mountPath": "string",
+ "path": {"inputPathType": "Url", "url": "string"},
+ }
+ ],
+ "modelConfiguration": {"mode": "ReadOnlyMount", "mountPath": "string"},
+ "tags": {"string": "string"},
+ "targetEnvironmentId": "string",
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Registry/ModelVersion/package.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/create_or_update.py
index 351a1bcdea58..3bbb41834ae4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/create_or_update.py
@@ -57,6 +57,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Schedule/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Schedule/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/delete.py
index 69da5de82fe1..14d9d1efd2c0 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/delete.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Schedule/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Schedule/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/get.py
index 7772e8800fa9..413a44c3fb0d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Schedule/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Schedule/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/list.py
index 0610eab1d68d..f15fdebd72d6 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Schedule/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Schedule/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/usage/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/usage/list.py
index ccff07e2fa36..c69568cd2c73 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/usage/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/usage/list.py
@@ -36,6 +36,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Usage/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Usage/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/virtual_machine_size/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/virtual_machine_size/list.py
index e84b655dbd1e..28d5f3e62900 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/virtual_machine_size/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/virtual_machine_size/list.py
@@ -35,6 +35,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/VirtualMachineSize/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/VirtualMachineSize/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/create_or_update.py
index 64b1e00ff40c..9d0fe1181c17 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/create_or_update.py
@@ -66,6 +66,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchDeployment/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/delete.py
index e4e9b5706e42..f7a68ff86600 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/delete.py
@@ -37,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchDeployment/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/get.py
index d1617e809898..666551ec3a06 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchDeployment/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/list.py
index f1242ba35f7e..b161f03b0297 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchDeployment/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/update.py
index 102b3fe0734c..1b8ea83b0cce 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/update.py
@@ -39,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchDeployment/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/create_or_update.py
index 2074040353b4..9c9547d8c250 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/create_or_update.py
@@ -50,6 +50,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchEndpoint/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/delete.py
index 80701cf2af60..7d2f9f165a76 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/delete.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchEndpoint/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/get.py
index 990f55858aee..76c48e8ee8ae 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchEndpoint/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list.py
index e3e3fe7fd1dc..555a91186bab 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchEndpoint/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list_keys.py
index cc96bcdff698..f26ddfd8b99e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list_keys.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchEndpoint/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/update.py
index f6725836e685..1ed9584aab08 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/update.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/BatchEndpoint/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/create_or_update.py
index af061e818d47..59543aedeb8f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/create_or_update.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/CodeContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/delete.py
index 7da2cb51b90a..f8db39f78db5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/delete.py
@@ -36,6 +36,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/CodeContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/get.py
index f56a7a87e660..a8455f5f69b4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/CodeContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/list.py
index 001ce2be8571..2ef2f9d1d2e1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/CodeContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_get_start_pending_upload.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_get_start_pending_upload.py
index cea171e25bfb..626c08f0a40f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_get_start_pending_upload.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_get_start_pending_upload.py
@@ -34,11 +34,11 @@ def main():
workspace_name="my-aml-workspace",
name="string",
version="string",
- body={"pendingUploadId": "string", "pendingUploadType": "None"},
+ body={"pendingUploadId": "string", "pendingUploadType": "TemporaryBlobReference"},
)
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/createOrGetStartPendingUpload.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/CodeVersion/createOrGetStartPendingUpload.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_update.py
index b894f4bda5f4..2ae3ae2d3e77 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_update.py
@@ -47,6 +47,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/CodeVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/delete.py
index c78c15a8ed47..d0e90d3aee79 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/delete.py
@@ -37,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/CodeVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/get.py
index ecae464c7718..efffab4c0560 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/CodeVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/list.py
index 52a9742de543..fec690679f16 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/CodeVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/create_or_update.py
index 1c6f64c469e1..6c342242a4d3 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/create_or_update.py
@@ -40,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ComponentContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/delete.py
index 8826100a38e4..ce5b947b7eca 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/delete.py
@@ -36,6 +36,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ComponentContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/get.py
index 3954753f23ad..94970b11085b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ComponentContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/list.py
index 45d349e330cd..815dd337b15e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ComponentContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/create_or_update.py
index 724e7b3e919a..322353c15c10 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/create_or_update.py
@@ -47,6 +47,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ComponentVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/delete.py
index e3fe021ae631..f7d7825a8434 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/delete.py
@@ -37,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ComponentVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/get.py
index 5d23ed45e902..83946cd9bd57 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ComponentVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/list.py
index aaecb70ad47d..1cb3d904e86d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ComponentVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/create.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/create.py
index cd04a676e0e1..8d9a315c9756 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/create.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/create.py
@@ -32,7 +32,7 @@ def main():
response = client.workspaces.begin_create_or_update(
resource_group_name="workspace-1234",
workspace_name="testworkspace",
- parameters={
+ body={
"identity": {
"type": "SystemAssigned,UserAssigned",
"userAssignedIdentities": {
@@ -76,6 +76,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/create.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/create.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/create_or_update.py
index 98d366c715e9..8225fd3fe065 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/create_or_update.py
@@ -45,6 +45,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/DataContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/delete.py
index a135c9b5d424..be6869368331 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/delete.py
@@ -36,6 +36,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/DataContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/get.py
index 53ef7b99be97..4819af84f772 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/DataContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/list.py
index 3d48472b6ceb..c65334176228 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/DataContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/create_or_update.py
index 8705b06141a3..d15fce49b3cc 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/create_or_update.py
@@ -48,6 +48,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataVersionBase/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/DataVersionBase/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/delete.py
index 4b7c673a5232..86628998f174 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/delete.py
@@ -37,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataVersionBase/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/DataVersionBase/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/get.py
index d0e194b55997..ac3fed924f9f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataVersionBase/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/DataVersionBase/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/list.py
index 72bb1bc4b0e0..0ec5e7b8ac7f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataVersionBase/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/DataVersionBase/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/delete.py
index 205bca6f2e81..8fb3ec7a41d2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/delete.py
@@ -35,6 +35,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/diagnose.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/diagnose.py
index 3077375cd454..59f67c9be995 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/diagnose.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/diagnose.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/diagnose.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/diagnose.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/create_or_update.py
new file mode 100644
index 000000000000..ea1e5ca62d24
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/create_or_update.py
@@ -0,0 +1,50 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_containers.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={
+ "properties": {
+ "description": "string",
+ "isArchived": False,
+ "properties": {"string": "string"},
+ "tags": {"string": "string"},
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturesetContainer/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/delete.py
new file mode 100644
index 000000000000..d71e795f0e67
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/delete.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.featureset_containers.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturesetContainer/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/get_entity.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/get_entity.py
new file mode 100644
index 000000000000..a102551024dc
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/get_entity.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get_entity.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_containers.get_entity(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturesetContainer/getEntity.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/list.py
new file mode 100644
index 000000000000..149ab6286a11
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/list.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_containers.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturesetContainer/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/backfill.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/backfill.py
new file mode 100644
index 000000000000..d2c0100a7a40
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/backfill.py
@@ -0,0 +1,56 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python backfill.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_versions.begin_backfill(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={
+ "dataAvailabilityStatus": ["None"],
+ "description": "string",
+ "displayName": "string",
+ "featureWindow": {
+ "featureWindowEnd": "2020-01-01T12:34:56.999+00:51",
+ "featureWindowStart": "2020-01-01T12:34:56.999+00:51",
+ },
+ "jobId": "string",
+ "resource": {"instanceType": "string"},
+ "sparkConfiguration": {"string": "string"},
+ "tags": {"string": "string"},
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturesetVersion/backfill.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/create_or_update.py
new file mode 100644
index 000000000000..04837807bd0a
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/create_or_update.py
@@ -0,0 +1,70 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_versions.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={
+ "properties": {
+ "description": "string",
+ "entities": ["string"],
+ "isAnonymous": False,
+ "isArchived": False,
+ "materializationSettings": {
+ "notification": {"emailOn": ["JobFailed"], "emails": ["string"]},
+ "resource": {"instanceType": "string"},
+ "schedule": {
+ "endTime": "string",
+ "frequency": "Day",
+ "interval": 1,
+ "schedule": {"hours": [1], "minutes": [1], "monthDays": [1], "weekDays": ["Monday"]},
+ "startTime": "string",
+ "timeZone": "string",
+ "triggerType": "Recurrence",
+ },
+ "sparkConfiguration": {"string": "string"},
+ "storeType": "Online",
+ },
+ "properties": {"string": "string"},
+ "specification": {"path": "string"},
+ "stage": "string",
+ "tags": {"string": "string"},
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturesetVersion/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/delete.py
new file mode 100644
index 000000000000..675ab8510fb5
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/delete.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.featureset_versions.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturesetVersion/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/get.py
new file mode 100644
index 000000000000..982d564710b8
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/get.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_versions.get(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturesetVersion/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/list.py
new file mode 100644
index 000000000000..2119a6f8e5b6
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/list.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_versions.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturesetVersion/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/create_or_update.py
new file mode 100644
index 000000000000..e34358e8f3be
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/create_or_update.py
@@ -0,0 +1,50 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_containers.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={
+ "properties": {
+ "description": "string",
+ "isArchived": False,
+ "properties": {"string": "string"},
+ "tags": {"string": "string"},
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturestoreEntityContainer/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/delete.py
new file mode 100644
index 000000000000..89c2850c48e8
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/delete.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.featurestore_entity_containers.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturestoreEntityContainer/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/get_entity.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/get_entity.py
new file mode 100644
index 000000000000..da14088ac564
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/get_entity.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get_entity.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_containers.get_entity(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturestoreEntityContainer/getEntity.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/list.py
new file mode 100644
index 000000000000..bda03debb692
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/list.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_containers.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturestoreEntityContainer/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/create_or_update.py
new file mode 100644
index 000000000000..725244f9e683
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/create_or_update.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_versions.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={
+ "properties": {
+ "description": "string",
+ "indexColumns": [{"columnName": "string", "dataType": "Datetime"}],
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"string": "string"},
+ "tags": {"string": "string"},
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturestoreEntityVersion/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/delete.py
new file mode 100644
index 000000000000..76562bb88f1b
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/delete.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.featurestore_entity_versions.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturestoreEntityVersion/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/get.py
new file mode 100644
index 000000000000..1a79454327c8
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/get.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_versions.get(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturestoreEntityVersion/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/list.py
new file mode 100644
index 000000000000..def5c900c951
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/list.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_versions.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/FeaturestoreEntityVersion/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/get.py
index 4c2f399080e3..f7d5bd9a711f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/get.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_resource_group.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_resource_group.py
index 48ad1779487f..19e092892b27 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_resource_group.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_resource_group.py
@@ -36,6 +36,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listByResourceGroup.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/listByResourceGroup.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_subscription.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_subscription.py
index 921bfb2b59f6..850b52f9f736 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_subscription.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_subscription.py
@@ -34,6 +34,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listBySubscription.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/listBySubscription.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_keys.py
index 9e0e183e60ef..7e0a2e11beee 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_keys.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_notebook_access_token.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_notebook_access_token.py
index d180f3ee30c2..9bfeb626f21d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_notebook_access_token.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_notebook_access_token.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listNotebookAccessToken.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/listNotebookAccessToken.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_storage_account_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_storage_account_keys.py
index dbc963513d84..d4f3f0fcfa09 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_storage_account_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_storage_account_keys.py
@@ -36,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listStorageAccountKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/listStorageAccountKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/create_or_update.py
index 3c3a3ce3aaf6..968e2bbc8bcb 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/create_or_update.py
@@ -40,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ModelContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/delete.py
index 7a5a7947d5a4..dc77d70ba40f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/delete.py
@@ -36,6 +36,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ModelContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/get.py
index 9a5d2fcc742a..f47ce1afeca8 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ModelContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/list.py
index 988fdad83e14..2e1a8dc08add 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ModelContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/create_or_update.py
index 0d9996be1fbb..63da78d8188a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/create_or_update.py
@@ -49,6 +49,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ModelVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/delete.py
index ddf28eee5525..00c293b75c14 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/delete.py
@@ -37,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ModelVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/get.py
index 3322b8491634..bc99f97de710 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/get.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ModelVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/list.py
index b6fe6cc09d99..8e28a98e2038 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/list.py
@@ -38,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ModelVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/package.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/package.py
new file mode 100644
index 000000000000..e25a8b42e505
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/package.py
@@ -0,0 +1,62 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python package.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.model_versions.begin_package(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={
+ "baseEnvironmentSource": {"baseEnvironmentSourceType": "EnvironmentAsset", "resourceId": "string"},
+ "environmentVariables": {"string": "string"},
+ "inferencingServer": {
+ "codeConfiguration": {"codeId": "string", "scoringScript": "string"},
+ "serverType": "AzureMLBatch",
+ },
+ "inputs": [
+ {
+ "inputType": "UriFile",
+ "mode": "Download",
+ "mountPath": "string",
+ "path": {"inputPathType": "Url", "url": "string"},
+ }
+ ],
+ "modelConfiguration": {"mode": "ReadOnlyMount", "mountPath": "string"},
+ "tags": {"string": "string"},
+ "targetEnvironmentId": "string",
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ModelVersion/package.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_deployment/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_deployment/delete.py
index 209f1dc99b4c..6fe7b2d0ef8b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_deployment/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_deployment/delete.py
@@ -37,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineDeployment/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/OnlineDeployment/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/create_or_update.py
index 0ad2062a3eef..a95de5ca05b0 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/create_or_update.py
@@ -51,6 +51,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/OnlineEndpoint/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/delete.py
index c00b39bfcf47..4d6ec5ffcffe 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/delete.py
@@ -36,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/OnlineEndpoint/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get.py
index c22efccb2c1e..ab1e1d689b9f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/OnlineEndpoint/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get_token.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get_token.py
index f721177eb41c..67eed0d75ac5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get_token.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get_token.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/getToken.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/OnlineEndpoint/getToken.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list.py
index 741567b8ceed..fec612712f56 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/OnlineEndpoint/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list_keys.py
index 550945711c4b..a9f6937a74c7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list_keys.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/OnlineEndpoint/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/regenerate_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/regenerate_keys.py
index f6f874cb710c..d7817b93f345 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/regenerate_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/regenerate_keys.py
@@ -37,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/regenerateKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/OnlineEndpoint/regenerateKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/update.py
index b1c25f72b06b..a2a780f5d736 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/update.py
@@ -38,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/OnlineEndpoint/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/operations_list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/operations_list.py
index 91510c53d5b5..59db37497ee1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/operations_list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/operations_list.py
@@ -34,6 +34,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/operationsList.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/operationsList.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/resync_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/resync_keys.py
index f332a4120926..9610b8503e02 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/resync_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/resync_keys.py
@@ -35,6 +35,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/resyncKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/resyncKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/create_or_update.py
new file mode 100644
index 000000000000..c70d357366d8
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/create_or_update.py
@@ -0,0 +1,54 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={
+ "identity": {"type": "UserAssigned", "userAssignedIdentities": {"string": {}}},
+ "kind": "string",
+ "location": "string",
+ "properties": {
+ "authMode": "AAD",
+ "capacityReservation": {"capacityReservationGroupId": "string", "endpointReservedCapacity": 1},
+ "offer": {"offerName": "string", "publisher": "string"},
+ },
+ "sku": {"capacity": 1, "family": "string", "name": "string", "size": "string", "tier": "Standard"},
+ "tags": {},
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ServerlessEndpoint/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/delete.py
new file mode 100644
index 000000000000..56a8fa2cda23
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/delete.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.serverless_endpoints.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ServerlessEndpoint/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/get.py
new file mode 100644
index 000000000000..b217efd37f85
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/get.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.get(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ServerlessEndpoint/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/get_status.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/get_status.py
new file mode 100644
index 000000000000..4889cf5ad484
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/get_status.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get_status.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.get_status(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ServerlessEndpoint/getStatus.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list.py
new file mode 100644
index 000000000000..04dfb28cc630
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ServerlessEndpoint/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list_keys.py
new file mode 100644
index 000000000000..cb5e400eb01d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list_keys.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list_keys.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.list_keys(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ServerlessEndpoint/listKeys.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/regenerate_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/regenerate_keys.py
new file mode 100644
index 000000000000..4ae7d927dca3
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/regenerate_keys.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python regenerate_keys.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.begin_regenerate_keys(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={"keyType": "Primary", "keyValue": "string"},
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ServerlessEndpoint/regenerateKeys.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/update.py
new file mode 100644
index 000000000000..3d6b5f8925db
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/update.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.begin_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={
+ "identity": {"type": "None", "userAssignedIdentities": {"string": {}}},
+ "sku": {"capacity": 1, "family": "string", "name": "string", "size": "string", "tier": "Basic"},
+ "tags": {},
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/ServerlessEndpoint/update.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/update.py
index f8b3fae5c4cc..1bb4bc1c0e3e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/update.py
@@ -32,7 +32,7 @@ def main():
response = client.workspaces.begin_update(
resource_group_name="workspace-1234",
workspace_name="testworkspace",
- parameters={
+ body={
"properties": {
"description": "new description",
"friendlyName": "New friendly name",
@@ -43,6 +43,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/Workspace/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/create.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/create.py
index 110d4862cc3a..00ebc35675c5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/create.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/create.py
@@ -33,11 +33,10 @@ def main():
resource_group_name="resourceGroup-1",
workspace_name="workspace-1",
connection_name="connection-1",
- parameters={"properties": {"authType": "None", "category": "ContainerRegistry", "target": "www.facebook.com"}},
)
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceConnection/create.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/WorkspaceConnection/create.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/delete.py
index c154fbe498b6..b03762a45fa9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/delete.py
@@ -36,6 +36,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceConnection/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/WorkspaceConnection/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/get.py
index 98aa005623e0..0da0a48fbe1f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/get.py
@@ -37,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceConnection/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/WorkspaceConnection/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list.py
index 3f4299cf1809..575522b2c35d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceConnection/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/WorkspaceConnection/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list_secrets.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list_secrets.py
new file mode 100644
index 000000000000..36caad8e20b2
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list_secrets.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list_secrets.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.workspace_connections.list_secrets(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ connection_name="some_string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/WorkspaceConnection/listSecrets.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/test_connection.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/test_connection.py
new file mode 100644
index 000000000000..0eb83c389af3
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/test_connection.py
@@ -0,0 +1,41 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python test_connection.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.workspace_connections.begin_test_connection(
+ resource_group_name="resourceGroup-1",
+ workspace_name="workspace-1",
+ connection_name="connection-1",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/WorkspaceConnection/testConnection.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/update.py
new file mode 100644
index 000000000000..024ff2266ce4
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/update.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.workspace_connections.update(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ connection_name="some_string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/WorkspaceConnection/update.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_feature/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_feature/list.py
index 3f1b7fa8da64..2b17054878e5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_feature/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_feature/list.py
@@ -37,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceFeature/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/preview/2023-08-01-preview/examples/WorkspaceFeature/list.json
if __name__ == "__main__":
main()