-
Notifications
You must be signed in to change notification settings - Fork 2.9k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[AutoPR datafactory/resource-manager] Add AzureDatabricks Linked Serv…
…ice and DatabricksNote Activity (#2081) * Generated from 2a25328ea6d9c2c7e49a1ecd3ed07aaecddd0c54 fix a description * Generated from c1cfc35403764b249f3998e4a4df37ae98d9b824 remove addtional ,
- Loading branch information
1 parent
a717dbd
commit 8cf0dc0
Showing
20 changed files
with
576 additions
and
34 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
99 changes: 99 additions & 0 deletions
99
azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_databricks_linked_service.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
# coding=utf-8 | ||
# -------------------------------------------------------------------------- | ||
# Copyright (c) Microsoft Corporation. All rights reserved. | ||
# Licensed under the MIT License. See License.txt in the project root for | ||
# license information. | ||
# | ||
# Code generated by Microsoft (R) AutoRest Code Generator. | ||
# Changes may cause incorrect behavior and will be lost if the code is | ||
# regenerated. | ||
# -------------------------------------------------------------------------- | ||
|
||
from .linked_service import LinkedService | ||
|
||
|
||
class AzureDatabricksLinkedService(LinkedService): | ||
"""Azure Databricks linked service. | ||
:param additional_properties: Unmatched properties from the message are | ||
deserialized this collection | ||
:type additional_properties: dict[str, object] | ||
:param connect_via: The integration runtime reference. | ||
:type connect_via: | ||
~azure.mgmt.datafactory.models.IntegrationRuntimeReference | ||
:param description: Linked service description. | ||
:type description: str | ||
:param parameters: Parameters for linked service. | ||
:type parameters: dict[str, | ||
~azure.mgmt.datafactory.models.ParameterSpecification] | ||
:param annotations: List of tags that can be used for describing the | ||
Dataset. | ||
:type annotations: list[object] | ||
:param type: Constant filled by server. | ||
:type type: str | ||
:param domain: <REGION>.azuredatabricks.net, domain name of your | ||
Databricks deployment. Type: string (or Expression with resultType | ||
string). | ||
:type domain: object | ||
:param access_token: Access token for databricks REST API. Refer to | ||
https://docs.azuredatabricks.net/api/latest/authentication.html. Type: | ||
string (or Expression with resultType string). | ||
:type access_token: ~azure.mgmt.datafactory.models.SecretBase | ||
:param existing_cluster_id: The id of an existing cluster that will be | ||
used for all runs of this job. Type: string (or Expression with resultType | ||
string). | ||
:type existing_cluster_id: object | ||
:param new_cluster_version: The Spark version of new cluster. Type: string | ||
(or Expression with resultType string). | ||
:type new_cluster_version: object | ||
:param new_cluster_num_of_worker: Number of worker nodes that new cluster | ||
should have. A string formatted Int32, like '1' means numOfWorker is 1 or | ||
'1:10' means auto-scale from 1 as min and 10 as max. Type: string (or | ||
Expression with resultType string). | ||
:type new_cluster_num_of_worker: object | ||
:param new_cluster_node_type: The node types of new cluster. Type: string | ||
(or Expression with resultType string). | ||
:type new_cluster_node_type: object | ||
:param new_cluster_spark_conf: a set of optional, user-specified Spark | ||
configuration key-value pairs. | ||
:type new_cluster_spark_conf: dict[str, object] | ||
:param encrypted_credential: The encrypted credential used for | ||
authentication. Credentials are encrypted using the integration runtime | ||
credential manager. Type: string (or Expression with resultType string). | ||
:type encrypted_credential: object | ||
""" | ||
|
||
_validation = { | ||
'type': {'required': True}, | ||
'domain': {'required': True}, | ||
'access_token': {'required': True}, | ||
} | ||
|
||
_attribute_map = { | ||
'additional_properties': {'key': '', 'type': '{object}'}, | ||
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'}, | ||
'description': {'key': 'description', 'type': 'str'}, | ||
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'}, | ||
'annotations': {'key': 'annotations', 'type': '[object]'}, | ||
'type': {'key': 'type', 'type': 'str'}, | ||
'domain': {'key': 'typeProperties.domain', 'type': 'object'}, | ||
'access_token': {'key': 'typeProperties.accessToken', 'type': 'SecretBase'}, | ||
'existing_cluster_id': {'key': 'typeProperties.existingClusterId', 'type': 'object'}, | ||
'new_cluster_version': {'key': 'typeProperties.newClusterVersion', 'type': 'object'}, | ||
'new_cluster_num_of_worker': {'key': 'typeProperties.newClusterNumOfWorker', 'type': 'object'}, | ||
'new_cluster_node_type': {'key': 'typeProperties.newClusterNodeType', 'type': 'object'}, | ||
'new_cluster_spark_conf': {'key': 'typeProperties.newClusterSparkConf', 'type': '{object}'}, | ||
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'}, | ||
} | ||
|
||
def __init__(self, domain, access_token, additional_properties=None, connect_via=None, description=None, parameters=None, annotations=None, existing_cluster_id=None, new_cluster_version=None, new_cluster_num_of_worker=None, new_cluster_node_type=None, new_cluster_spark_conf=None, encrypted_credential=None): | ||
super(AzureDatabricksLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description, parameters=parameters, annotations=annotations) | ||
self.domain = domain | ||
self.access_token = access_token | ||
self.existing_cluster_id = existing_cluster_id | ||
self.new_cluster_version = new_cluster_version | ||
self.new_cluster_num_of_worker = new_cluster_num_of_worker | ||
self.new_cluster_node_type = new_cluster_node_type | ||
self.new_cluster_spark_conf = new_cluster_spark_conf | ||
self.encrypted_credential = encrypted_credential | ||
self.type = 'AzureDatabricks' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
66 changes: 66 additions & 0 deletions
66
azure-mgmt-datafactory/azure/mgmt/datafactory/models/databricks_notebook_activity.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
# coding=utf-8 | ||
# -------------------------------------------------------------------------- | ||
# Copyright (c) Microsoft Corporation. All rights reserved. | ||
# Licensed under the MIT License. See License.txt in the project root for | ||
# license information. | ||
# | ||
# Code generated by Microsoft (R) AutoRest Code Generator. | ||
# Changes may cause incorrect behavior and will be lost if the code is | ||
# regenerated. | ||
# -------------------------------------------------------------------------- | ||
|
||
from .execution_activity import ExecutionActivity | ||
|
||
|
||
class DatabricksNotebookActivity(ExecutionActivity): | ||
"""DatabricksNotebook activity. | ||
:param additional_properties: Unmatched properties from the message are | ||
deserialized this collection | ||
:type additional_properties: dict[str, object] | ||
:param name: Activity name. | ||
:type name: str | ||
:param description: Activity description. | ||
:type description: str | ||
:param depends_on: Activity depends on condition. | ||
:type depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency] | ||
:param type: Constant filled by server. | ||
:type type: str | ||
:param linked_service_name: Linked service reference. | ||
:type linked_service_name: | ||
~azure.mgmt.datafactory.models.LinkedServiceReference | ||
:param policy: Activity policy. | ||
:type policy: ~azure.mgmt.datafactory.models.ActivityPolicy | ||
:param notebook_path: The absolute path of the notebook to be run in the | ||
Databricks Workspace. This path must begin with a slash. Type: string (or | ||
Expression with resultType string). | ||
:type notebook_path: object | ||
:param base_parameters: Base parameters to be used for each run of this | ||
job.If the notebook takes a parameter that is not specified, the default | ||
value from the notebook will be used. | ||
:type base_parameters: dict[str, object] | ||
""" | ||
|
||
_validation = { | ||
'name': {'required': True}, | ||
'type': {'required': True}, | ||
'notebook_path': {'required': True}, | ||
} | ||
|
||
_attribute_map = { | ||
'additional_properties': {'key': '', 'type': '{object}'}, | ||
'name': {'key': 'name', 'type': 'str'}, | ||
'description': {'key': 'description', 'type': 'str'}, | ||
'depends_on': {'key': 'dependsOn', 'type': '[ActivityDependency]'}, | ||
'type': {'key': 'type', 'type': 'str'}, | ||
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'}, | ||
'policy': {'key': 'policy', 'type': 'ActivityPolicy'}, | ||
'notebook_path': {'key': 'typeProperties.notebookPath', 'type': 'object'}, | ||
'base_parameters': {'key': 'typeProperties.baseParameters', 'type': '{object}'}, | ||
} | ||
|
||
def __init__(self, name, notebook_path, additional_properties=None, description=None, depends_on=None, linked_service_name=None, policy=None, base_parameters=None): | ||
super(DatabricksNotebookActivity, self).__init__(additional_properties=additional_properties, name=name, description=description, depends_on=depends_on, linked_service_name=linked_service_name, policy=policy) | ||
self.notebook_path = notebook_path | ||
self.base_parameters = base_parameters | ||
self.type = 'DatabricksNotebook' |
Oops, something went wrong.