From 54550002536e324104de210d1d7a87205a01b763 Mon Sep 17 00:00:00 2001 From: pulumi-bot Date: Fri, 13 Dec 2024 14:33:30 +0000 Subject: [PATCH] make build_sdks --- sdk/dotnet/GetAwsAssumeRolePolicy.cs | 67 + sdk/dotnet/GetAwsBucketPolicy.cs | 39 + sdk/dotnet/GetAwsCrossAccountPolicy.cs | 34 + .../GetAwsUnityCatalogAssumeRolePolicy.cs | 53 + sdk/dotnet/GetAwsUnityCatalogPolicy.cs | 53 + sdk/dotnet/GetCatalog.cs | 53 + sdk/dotnet/GetCatalogs.cs | 38 + sdk/dotnet/GetCluster.cs | 39 + sdk/dotnet/GetClusterPolicy.cs | 33 + sdk/dotnet/GetClusters.cs | 55 + sdk/dotnet/GetCurrentConfig.cs | 51 + sdk/dotnet/GetCurrentMetastore.cs | 40 + sdk/dotnet/GetCurrentUser.cs | 8 + sdk/dotnet/GetDbfsFile.cs | 36 + sdk/dotnet/GetDbfsFilePaths.cs | 38 + sdk/dotnet/GetDirectory.cs | 26 + sdk/dotnet/GetExternalLocation.cs | 39 + sdk/dotnet/GetExternalLocations.cs | 36 + sdk/dotnet/GetFunctions.cs | 39 + sdk/dotnet/GetGroup.cs | 50 + sdk/dotnet/GetInstancePool.cs | 33 + sdk/dotnet/GetInstanceProfiles.cs | 27 + sdk/dotnet/GetJob.cs | 39 + sdk/dotnet/GetJobs.cs | 69 + sdk/dotnet/GetMetastore.cs | 57 + sdk/dotnet/GetMetastores.cs | 39 + sdk/dotnet/GetMlflowExperiment.cs | 8 + sdk/dotnet/GetMlflowModel.cs | 84 + sdk/dotnet/GetMlflowModels.cs | 27 + sdk/dotnet/GetMwsCredentials.cs | 42 + sdk/dotnet/GetMwsWorkspaces.cs | 38 + sdk/dotnet/GetNodeType.cs | 60 + sdk/dotnet/GetNotebook.cs | 27 + sdk/dotnet/GetNotebookPaths.cs | 27 + sdk/dotnet/GetNotificationDestinations.cs | 56 + sdk/dotnet/GetPipelines.cs | 83 + sdk/dotnet/GetRegisteredModel.cs | 34 + sdk/dotnet/GetSchema.cs | 54 + sdk/dotnet/GetSchemas.cs | 41 + sdk/dotnet/GetServicePrincipal.cs | 52 + sdk/dotnet/GetServicePrincipals.cs | 8 + sdk/dotnet/GetShare.cs | 38 + sdk/dotnet/GetShares.cs | 35 + sdk/dotnet/GetSparkVersion.cs | 60 + sdk/dotnet/GetSqlWarehouse.cs | 55 + sdk/dotnet/GetSqlWarehouses.cs | 53 + sdk/dotnet/GetStorageCredential.cs | 39 + sdk/dotnet/GetStorageCredentials.cs | 36 + sdk/dotnet/GetTable.cs | 54 + sdk/dotnet/GetTables.cs | 59 + sdk/dotnet/GetUser.cs | 53 + sdk/dotnet/GetViews.cs | 57 + sdk/dotnet/GetVolume.cs | 56 + sdk/dotnet/GetVolumes.cs | 45 + sdk/dotnet/GetZones.cs | 23 + sdk/dotnet/Utilities.cs | 7 + sdk/go/databricks/getAwsAssumeRolePolicy.go | 16 +- sdk/go/databricks/getAwsBucketPolicy.go | 16 +- sdk/go/databricks/getAwsCrossAccountPolicy.go | 16 +- .../getAwsUnityCatalogAssumeRolePolicy.go | 16 +- sdk/go/databricks/getAwsUnityCatalogPolicy.go | 16 +- sdk/go/databricks/getCatalog.go | 16 +- sdk/go/databricks/getCatalogs.go | 16 +- sdk/go/databricks/getCluster.go | 16 +- sdk/go/databricks/getClusterPolicy.go | 16 +- sdk/go/databricks/getClusters.go | 16 +- sdk/go/databricks/getCurrentConfig.go | 16 +- sdk/go/databricks/getCurrentMetastore.go | 16 +- sdk/go/databricks/getCurrentUser.go | 14 +- sdk/go/databricks/getDbfsFile.go | 16 +- sdk/go/databricks/getDbfsFilePaths.go | 16 +- sdk/go/databricks/getDirectory.go | 16 +- sdk/go/databricks/getExternalLocation.go | 16 +- sdk/go/databricks/getExternalLocations.go | 16 +- sdk/go/databricks/getFunctions.go | 16 +- sdk/go/databricks/getGroup.go | 16 +- sdk/go/databricks/getInstancePool.go | 16 +- sdk/go/databricks/getInstanceProfiles.go | 16 +- sdk/go/databricks/getJob.go | 16 +- sdk/go/databricks/getJobs.go | 16 +- sdk/go/databricks/getMetastore.go | 16 +- sdk/go/databricks/getMetastores.go | 16 +- sdk/go/databricks/getMlflowExperiment.go | 16 +- sdk/go/databricks/getMlflowModel.go | 16 +- sdk/go/databricks/getMlflowModels.go | 16 +- sdk/go/databricks/getMwsCredentials.go | 16 +- sdk/go/databricks/getMwsWorkspaces.go | 16 +- sdk/go/databricks/getNodeType.go | 16 +- sdk/go/databricks/getNotebook.go | 16 +- sdk/go/databricks/getNotebookPaths.go | 16 +- .../databricks/getNotificationDestinations.go | 16 +- sdk/go/databricks/getPipelines.go | 16 +- sdk/go/databricks/getRegisteredModel.go | 16 +- sdk/go/databricks/getSchema.go | 16 +- sdk/go/databricks/getSchemas.go | 16 +- sdk/go/databricks/getServicePrincipal.go | 16 +- sdk/go/databricks/getServicePrincipals.go | 16 +- sdk/go/databricks/getShare.go | 16 +- sdk/go/databricks/getShares.go | 16 +- sdk/go/databricks/getSparkVersion.go | 16 +- sdk/go/databricks/getSqlWarehouse.go | 16 +- sdk/go/databricks/getSqlWarehouses.go | 16 +- sdk/go/databricks/getStorageCredential.go | 16 +- sdk/go/databricks/getStorageCredentials.go | 16 +- sdk/go/databricks/getTable.go | 16 +- sdk/go/databricks/getTables.go | 16 +- sdk/go/databricks/getUser.go | 16 +- sdk/go/databricks/getViews.go | 16 +- sdk/go/databricks/getVolume.go | 16 +- sdk/go/databricks/getVolumes.go | 16 +- sdk/go/databricks/getZones.go | 16 +- sdk/java/build.gradle | 2 +- .../databricks/DatabricksFunctions.java | 4092 +++++++++++++++-- .../java/com/pulumi/databricks/Utilities.java | 31 +- sdk/nodejs/getCluster.ts | 4 +- sdk/nodejs/getSchema.ts | 4 +- sdk/nodejs/getSqlWarehouse.ts | 4 +- sdk/nodejs/getVolume.ts | 4 +- sdk/nodejs/package.json | 2 +- sdk/python/pyproject.toml | 4 +- 120 files changed, 6284 insertions(+), 1143 deletions(-) diff --git a/sdk/dotnet/GetAwsAssumeRolePolicy.cs b/sdk/dotnet/GetAwsAssumeRolePolicy.cs index a1e7ff7f4..5768b89e6 100644 --- a/sdk/dotnet/GetAwsAssumeRolePolicy.cs +++ b/sdk/dotnet/GetAwsAssumeRolePolicy.cs @@ -144,6 +144,73 @@ public static Task InvokeAsync(GetAwsAssumeRolePol /// public static Output Invoke(GetAwsAssumeRolePolicyInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsAssumeRolePolicy:getAwsAssumeRolePolicy", args ?? new GetAwsAssumeRolePolicyInvokeArgs(), options.WithDefaults()); + + /// + /// This data source constructs necessary AWS STS assume role policy for you. + /// + /// ## Example Usage + /// + /// End-to-end example of provisioning Cross-account IAM role with databricks.MwsCredentials and aws_iam_role: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Aws = Pulumi.Aws; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var config = new Config(); + /// // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/ + /// var databricksAccountId = config.RequireObject<dynamic>("databricksAccountId"); + /// var @this = Databricks.GetAwsCrossAccountPolicy.Invoke(); + /// + /// var crossAccountPolicy = new Aws.Iam.Policy("cross_account_policy", new() + /// { + /// Name = $"{prefix}-crossaccount-iam-policy", + /// PolicyDocument = @this.Apply(@this => @this.Apply(getAwsCrossAccountPolicyResult => getAwsCrossAccountPolicyResult.Json)), + /// }); + /// + /// var thisGetAwsAssumeRolePolicy = Databricks.GetAwsAssumeRolePolicy.Invoke(new() + /// { + /// ExternalId = databricksAccountId, + /// }); + /// + /// var crossAccount = new Aws.Iam.Role("cross_account", new() + /// { + /// Name = $"{prefix}-crossaccount-iam-role", + /// AssumeRolePolicy = thisGetAwsAssumeRolePolicy.Apply(getAwsAssumeRolePolicyResult => getAwsAssumeRolePolicyResult.Json), + /// Description = "Grants Databricks full access to VPC resources", + /// }); + /// + /// var crossAccountRolePolicyAttachment = new Aws.Iam.RolePolicyAttachment("cross_account", new() + /// { + /// PolicyArn = crossAccountPolicy.Arn, + /// Role = crossAccount.Name, + /// }); + /// + /// // required only in case of multi-workspace setup + /// var thisMwsCredentials = new Databricks.MwsCredentials("this", new() + /// { + /// AccountId = databricksAccountId, + /// CredentialsName = $"{prefix}-creds", + /// RoleArn = crossAccount.Arn, + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide + /// * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. + /// * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). + /// + public static Output Invoke(GetAwsAssumeRolePolicyInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsAssumeRolePolicy:getAwsAssumeRolePolicy", args ?? new GetAwsAssumeRolePolicyInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetAwsBucketPolicy.cs b/sdk/dotnet/GetAwsBucketPolicy.cs index 556f8001f..defb3a29e 100644 --- a/sdk/dotnet/GetAwsBucketPolicy.cs +++ b/sdk/dotnet/GetAwsBucketPolicy.cs @@ -88,6 +88,45 @@ public static Task InvokeAsync(GetAwsBucketPolicyArgs /// public static Output Invoke(GetAwsBucketPolicyInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsBucketPolicy:getAwsBucketPolicy", args ?? new GetAwsBucketPolicyInvokeArgs(), options.WithDefaults()); + + /// + /// This datasource configures a simple access policy for AWS S3 buckets, so that Databricks can access data in it. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Aws = Pulumi.Aws; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var thisBucketV2 = new Aws.S3.BucketV2("this", new() + /// { + /// Bucket = "<unique_bucket_name>", + /// ForceDestroy = true, + /// }); + /// + /// var @this = Databricks.GetAwsBucketPolicy.Invoke(new() + /// { + /// Bucket = thisBucketV2.Bucket, + /// }); + /// + /// var thisBucketPolicy = new Aws.S3.BucketPolicy("this", new() + /// { + /// Bucket = thisBucketV2.Id, + /// Policy = @this.Apply(@this => @this.Apply(getAwsBucketPolicyResult => getAwsBucketPolicyResult.Json)), + /// }); + /// + /// }); + /// ``` + /// + /// Bucket policy with full access: + /// + public static Output Invoke(GetAwsBucketPolicyInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsBucketPolicy:getAwsBucketPolicy", args ?? new GetAwsBucketPolicyInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetAwsCrossAccountPolicy.cs b/sdk/dotnet/GetAwsCrossAccountPolicy.cs index f77b61b9e..ee0626c9f 100644 --- a/sdk/dotnet/GetAwsCrossAccountPolicy.cs +++ b/sdk/dotnet/GetAwsCrossAccountPolicy.cs @@ -78,6 +78,40 @@ public static Task InvokeAsync(GetAwsCrossAccoun /// public static Output Invoke(GetAwsCrossAccountPolicyInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsCrossAccountPolicy:getAwsCrossAccountPolicy", args ?? new GetAwsCrossAccountPolicyInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with an account-level provider! + /// + /// This data source constructs necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). + /// + /// ## Example Usage + /// + /// For more detailed usage please see databricks.getAwsAssumeRolePolicy or databricks_aws_s3_mount pages. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetAwsCrossAccountPolicy.Invoke(); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide + /// * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. + /// * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. + /// * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. + /// + public static Output Invoke(GetAwsCrossAccountPolicyInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsCrossAccountPolicy:getAwsCrossAccountPolicy", args ?? new GetAwsCrossAccountPolicyInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetAwsUnityCatalogAssumeRolePolicy.cs b/sdk/dotnet/GetAwsUnityCatalogAssumeRolePolicy.cs index 176d852f8..6f7ffab5c 100644 --- a/sdk/dotnet/GetAwsUnityCatalogAssumeRolePolicy.cs +++ b/sdk/dotnet/GetAwsUnityCatalogAssumeRolePolicy.cs @@ -116,6 +116,59 @@ public static Task InvokeAsync(GetAwsU /// public static Output Invoke(GetAwsUnityCatalogAssumeRolePolicyInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsUnityCatalogAssumeRolePolicy:getAwsUnityCatalogAssumeRolePolicy", args ?? new GetAwsUnityCatalogAssumeRolePolicyInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. + /// + /// This data source constructs the necessary AWS Unity Catalog assume role policy for you. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Aws = Pulumi.Aws; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetAwsUnityCatalogPolicy.Invoke(new() + /// { + /// AwsAccountId = awsAccountId, + /// BucketName = "databricks-bucket", + /// RoleName = $"{prefix}-uc-access", + /// KmsName = "arn:aws:kms:us-west-2:111122223333:key/databricks-kms", + /// }); + /// + /// var thisGetAwsUnityCatalogAssumeRolePolicy = Databricks.GetAwsUnityCatalogAssumeRolePolicy.Invoke(new() + /// { + /// AwsAccountId = awsAccountId, + /// RoleName = $"{prefix}-uc-access", + /// ExternalId = "12345", + /// }); + /// + /// var unityMetastore = new Aws.Iam.Policy("unity_metastore", new() + /// { + /// Name = $"{prefix}-unity-catalog-metastore-access-iam-policy", + /// PolicyDocument = @this.Apply(@this => @this.Apply(getAwsUnityCatalogPolicyResult => getAwsUnityCatalogPolicyResult.Json)), + /// }); + /// + /// var metastoreDataAccess = new Aws.Iam.Role("metastore_data_access", new() + /// { + /// Name = $"{prefix}-uc-access", + /// AssumeRolePolicy = thisGetAwsUnityCatalogAssumeRolePolicy.Apply(getAwsUnityCatalogAssumeRolePolicyResult => getAwsUnityCatalogAssumeRolePolicyResult.Json), + /// ManagedPolicyArns = new[] + /// { + /// unityMetastore.Arn, + /// }, + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetAwsUnityCatalogAssumeRolePolicyInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsUnityCatalogAssumeRolePolicy:getAwsUnityCatalogAssumeRolePolicy", args ?? new GetAwsUnityCatalogAssumeRolePolicyInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetAwsUnityCatalogPolicy.cs b/sdk/dotnet/GetAwsUnityCatalogPolicy.cs index 184294b24..dc7ad94e8 100644 --- a/sdk/dotnet/GetAwsUnityCatalogPolicy.cs +++ b/sdk/dotnet/GetAwsUnityCatalogPolicy.cs @@ -116,6 +116,59 @@ public static Task InvokeAsync(GetAwsUnityCatalo /// public static Output Invoke(GetAwsUnityCatalogPolicyInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy", args ?? new GetAwsUnityCatalogPolicyInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. + /// + /// This data source constructs the necessary AWS Unity Catalog policy for you. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Aws = Pulumi.Aws; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetAwsUnityCatalogPolicy.Invoke(new() + /// { + /// AwsAccountId = awsAccountId, + /// BucketName = "databricks-bucket", + /// RoleName = $"{prefix}-uc-access", + /// KmsName = "arn:aws:kms:us-west-2:111122223333:key/databricks-kms", + /// }); + /// + /// var thisGetAwsUnityCatalogAssumeRolePolicy = Databricks.GetAwsUnityCatalogAssumeRolePolicy.Invoke(new() + /// { + /// AwsAccountId = awsAccountId, + /// RoleName = $"{prefix}-uc-access", + /// ExternalId = "12345", + /// }); + /// + /// var unityMetastore = new Aws.Iam.Policy("unity_metastore", new() + /// { + /// Name = $"{prefix}-unity-catalog-metastore-access-iam-policy", + /// PolicyDocument = @this.Apply(@this => @this.Apply(getAwsUnityCatalogPolicyResult => getAwsUnityCatalogPolicyResult.Json)), + /// }); + /// + /// var metastoreDataAccess = new Aws.Iam.Role("metastore_data_access", new() + /// { + /// Name = $"{prefix}-uc-access", + /// AssumeRolePolicy = thisGetAwsUnityCatalogAssumeRolePolicy.Apply(getAwsUnityCatalogAssumeRolePolicyResult => getAwsUnityCatalogAssumeRolePolicyResult.Json), + /// ManagedPolicyArns = new[] + /// { + /// unityMetastore.Arn, + /// }, + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetAwsUnityCatalogPolicyInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy", args ?? new GetAwsUnityCatalogPolicyInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetCatalog.cs b/sdk/dotnet/GetCatalog.cs index fc7fc083d..e4b9eaf88 100644 --- a/sdk/dotnet/GetCatalog.cs +++ b/sdk/dotnet/GetCatalog.cs @@ -116,6 +116,59 @@ public static Task InvokeAsync(GetCatalogArgs args, InvokeOpti /// public static Output Invoke(GetCatalogInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCatalog:getCatalog", args ?? new GetCatalogInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves details of a specific catalog in Unity Catalog, that were created by Pulumi or manually. Use databricks.getCatalogs to retrieve IDs of multiple catalogs from Unity Catalog + /// + /// ## Example Usage + /// + /// Read on a specific catalog `test`: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var test = Databricks.GetCatalog.Invoke(new() + /// { + /// Name = "test", + /// }); + /// + /// var things = new Databricks.Grants("things", new() + /// { + /// Catalog = test.Apply(getCatalogResult => getCatalogResult.Name), + /// GrantDetails = new[] + /// { + /// new Databricks.Inputs.GrantsGrantArgs + /// { + /// Principal = "sensitive", + /// Privileges = new[] + /// { + /// "USE_CATALOG", + /// }, + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Grant to manage grants within Unity Catalog. + /// * databricks.getCatalogs to list all catalogs within Unity Catalog metastore. + /// + public static Output Invoke(GetCatalogInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCatalog:getCatalog", args ?? new GetCatalogInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetCatalogs.cs b/sdk/dotnet/GetCatalogs.cs index 7f8620507..5abfd0840 100644 --- a/sdk/dotnet/GetCatalogs.cs +++ b/sdk/dotnet/GetCatalogs.cs @@ -86,6 +86,44 @@ public static Task InvokeAsync(GetCatalogsArgs? args = null, /// public static Output Invoke(GetCatalogsInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCatalogs:getCatalogs", args ?? new GetCatalogsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves a list of databricks.Catalog ids, that were created by Pulumi or manually, so that special handling could be applied. + /// + /// ## Example Usage + /// + /// Listing all catalogs: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetCatalogs.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allCatalogs"] = all, + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Schema to manage schemas within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetCatalogsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCatalogs:getCatalogs", args ?? new GetCatalogsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetCluster.cs b/sdk/dotnet/GetCluster.cs index 67b9cd0f1..b98f37d73 100644 --- a/sdk/dotnet/GetCluster.cs +++ b/sdk/dotnet/GetCluster.cs @@ -88,6 +88,45 @@ public static Task InvokeAsync(GetClusterArgs? args = null, In /// public static Output Invoke(GetClusterInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCluster:getCluster", args ?? new GetClusterInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves information about a databricks.Cluster using its id. This could be retrieved programmatically using databricks.getClusters data source. + /// + /// ## Example Usage + /// + /// Retrieve attributes of each SQL warehouses in a workspace + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetClusters.Invoke(); + /// + /// var allGetCluster = ; + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are often used in the same context: + /// + /// * End to end workspace management guide. + /// * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + /// * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. + /// * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. + /// * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + /// * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster. + /// * databricks.Pipeline to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). + /// + public static Output Invoke(GetClusterInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCluster:getCluster", args ?? new GetClusterInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetClusterPolicy.cs b/sdk/dotnet/GetClusterPolicy.cs index bea8cbb89..962209a9b 100644 --- a/sdk/dotnet/GetClusterPolicy.cs +++ b/sdk/dotnet/GetClusterPolicy.cs @@ -76,6 +76,39 @@ public static Task InvokeAsync(GetClusterPolicyArgs? arg /// public static Output Invoke(GetClusterPolicyInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getClusterPolicy:getClusterPolicy", args ?? new GetClusterPolicyInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves information about databricks_cluster_policy. + /// + /// ## Example Usage + /// + /// Referring to a cluster policy by name: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var personal = Databricks.GetClusterPolicy.Invoke(new() + /// { + /// Name = "Personal Compute", + /// }); + /// + /// var myCluster = new Databricks.Cluster("my_cluster", new() + /// { + /// PolicyId = personal.Apply(getClusterPolicyResult => getClusterPolicyResult.Id), + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetClusterPolicyInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getClusterPolicy:getClusterPolicy", args ?? new GetClusterPolicyInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetClusters.cs b/sdk/dotnet/GetClusters.cs index 1fa63e724..6f05c0862 100644 --- a/sdk/dotnet/GetClusters.cs +++ b/sdk/dotnet/GetClusters.cs @@ -120,6 +120,61 @@ public static Task InvokeAsync(GetClustersArgs? args = null, /// public static Output Invoke(GetClustersInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getClusters:getClusters", args ?? new GetClustersInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves a list of databricks.Cluster ids, that were created by Pulumi or manually, with or without databricks_cluster_policy. + /// + /// ## Example Usage + /// + /// Retrieve cluster IDs for all clusters: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetClusters.Invoke(); + /// + /// }); + /// ``` + /// + /// Retrieve cluster IDs for all clusters having "Shared" in the cluster name: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var allShared = Databricks.GetClusters.Invoke(new() + /// { + /// ClusterNameContains = "shared", + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * End to end workspace management guide. + /// * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + /// * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. + /// * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. + /// * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + /// * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster. + /// * databricks.Pipeline to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). + /// + public static Output Invoke(GetClustersInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getClusters:getClusters", args ?? new GetClustersInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetCurrentConfig.cs b/sdk/dotnet/GetCurrentConfig.cs index c04857ca8..cdccd43e0 100644 --- a/sdk/dotnet/GetCurrentConfig.cs +++ b/sdk/dotnet/GetCurrentConfig.cs @@ -112,6 +112,57 @@ public static Task InvokeAsync(GetCurrentConfigArgs? arg /// public static Output Invoke(GetCurrentConfigInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCurrentConfig:getCurrentConfig", args ?? new GetCurrentConfigInvokeArgs(), options.WithDefaults()); + + /// + /// Retrieves information about the currently configured provider to make a decision, for example, add a dynamic block based on the specific cloud. + /// + /// ## Example Usage + /// + /// Create cloud-specific databricks_storage_credential: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetCurrentConfig.Invoke(); + /// + /// var external = new Databricks.StorageCredential("external", new() + /// { + /// AwsIamRole = Enumerable.Single(), + /// AzureManagedIdentity = Enumerable.Single(), + /// DatabricksGcpServiceAccount = Enumerable.Single(), + /// Name = "storage_cred", + /// Comment = "Managed by TF", + /// }); + /// + /// }); + /// ``` + /// + /// ## Exported attributes + /// + /// Data source exposes the following attributes: + /// + /// * `is_account` - Whether the provider is configured at account-level + /// * `account_id` - Account Id if provider is configured at account-level + /// * `host` - Host of the Databricks workspace or account console + /// * `cloud_type` - Cloud type specified in the provider + /// * `auth_type` - Auth type used by the provider + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * End to end workspace management guide + /// * databricks.Directory to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). + /// * databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). + /// * databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html). + /// + public static Output Invoke(GetCurrentConfigInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCurrentConfig:getCurrentConfig", args ?? new GetCurrentConfigInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetCurrentMetastore.cs b/sdk/dotnet/GetCurrentMetastore.cs index ef0644884..a4c1c7e6e 100644 --- a/sdk/dotnet/GetCurrentMetastore.cs +++ b/sdk/dotnet/GetCurrentMetastore.cs @@ -90,6 +90,46 @@ public static Task InvokeAsync(GetCurrentMetastoreArg /// public static Output Invoke(GetCurrentMetastoreInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCurrentMetastore:getCurrentMetastore", args ?? new GetCurrentMetastoreInvokeArgs(), options.WithDefaults()); + + /// + /// Retrieves information about metastore attached to a given workspace. + /// + /// > **Note** This is the workspace-level data source. + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + /// + /// ## Example Usage + /// + /// MetastoreSummary response for a metastore attached to the current workspace. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetCurrentMetastore.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["someMetastore"] = @this.Apply(@this => @this.Apply(getCurrentMetastoreResult => getCurrentMetastoreResult.MetastoreInfo)), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Metastore to get information for a metastore with a given ID. + /// * databricks.getMetastores to get a mapping of name to id of all metastores. + /// * databricks.Metastore to manage Metastores within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetCurrentMetastoreInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCurrentMetastore:getCurrentMetastore", args ?? new GetCurrentMetastoreInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetCurrentUser.cs b/sdk/dotnet/GetCurrentUser.cs index 7472b3d61..7781e8662 100644 --- a/sdk/dotnet/GetCurrentUser.cs +++ b/sdk/dotnet/GetCurrentUser.cs @@ -26,6 +26,14 @@ public static Task InvokeAsync(InvokeOptions? options = nu /// public static Output Invoke(InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCurrentUser:getCurrentUser", InvokeArgs.Empty, options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves information about databricks.User or databricks_service_principal, that is calling Databricks REST API. Might be useful in applying the same Pulumi by different users in the shared workspace for testing purposes. + /// + public static Output Invoke(InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCurrentUser:getCurrentUser", InvokeArgs.Empty, options.WithDefaults()); } diff --git a/sdk/dotnet/GetDbfsFile.cs b/sdk/dotnet/GetDbfsFile.cs index 1c44306d4..a00f3e085 100644 --- a/sdk/dotnet/GetDbfsFile.cs +++ b/sdk/dotnet/GetDbfsFile.cs @@ -82,6 +82,42 @@ public static Task InvokeAsync(GetDbfsFileArgs args, InvokeOp /// public static Output Invoke(GetDbfsFileInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getDbfsFile:getDbfsFile", args ?? new GetDbfsFileInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// This data source allows to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var report = Databricks.GetDbfsFile.Invoke(new() + /// { + /// Path = "dbfs:/reports/some.csv", + /// LimitFileSize = true, + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * End to end workspace management guide. + /// * databricks.getDbfsFilePaths data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + /// * databricks.DbfsFile to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + /// * databricks.Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. + /// + public static Output Invoke(GetDbfsFileInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getDbfsFile:getDbfsFile", args ?? new GetDbfsFileInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetDbfsFilePaths.cs b/sdk/dotnet/GetDbfsFilePaths.cs index e99f27b6c..363580ffe 100644 --- a/sdk/dotnet/GetDbfsFilePaths.cs +++ b/sdk/dotnet/GetDbfsFilePaths.cs @@ -86,6 +86,44 @@ public static Task InvokeAsync(GetDbfsFilePathsArgs args /// public static Output Invoke(GetDbfsFilePathsInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getDbfsFilePaths:getDbfsFilePaths", args ?? new GetDbfsFilePathsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// This data source allows to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var partitions = Databricks.GetDbfsFilePaths.Invoke(new() + /// { + /// Path = "dbfs:/user/hive/default.db/table", + /// Recursive = false, + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * End to end workspace management guide. + /// * databricks.DbfsFile data to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + /// * databricks.getDbfsFilePaths data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + /// * databricks.DbfsFile to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + /// * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster. + /// * databricks.Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. + /// + public static Output Invoke(GetDbfsFilePathsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getDbfsFilePaths:getDbfsFilePaths", args ?? new GetDbfsFilePathsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetDirectory.cs b/sdk/dotnet/GetDirectory.cs index 7c8f9b008..1f5ecb64c 100644 --- a/sdk/dotnet/GetDirectory.cs +++ b/sdk/dotnet/GetDirectory.cs @@ -62,6 +62,32 @@ public static Task InvokeAsync(GetDirectoryArgs args, Invoke /// public static Output Invoke(GetDirectoryInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getDirectory:getDirectory", args ?? new GetDirectoryInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// This data source allows to get information about a directory in a Databricks Workspace. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var prod = Databricks.GetDirectory.Invoke(new() + /// { + /// Path = "/Production", + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetDirectoryInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getDirectory:getDirectory", args ?? new GetDirectoryInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetExternalLocation.cs b/sdk/dotnet/GetExternalLocation.cs index 3d3aa11a5..a7d86a8aa 100644 --- a/sdk/dotnet/GetExternalLocation.cs +++ b/sdk/dotnet/GetExternalLocation.cs @@ -88,6 +88,45 @@ public static Task InvokeAsync(GetExternalLocationArg /// public static Output Invoke(GetExternalLocationInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getExternalLocation:getExternalLocation", args ?? new GetExternalLocationInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// Retrieves details about a databricks.ExternalLocation that were created by Pulumi or manually. + /// + /// ## Example Usage + /// + /// Getting details of an existing external location in the metastore + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetExternalLocation.Invoke(new() + /// { + /// Name = "this", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["createdBy"] = @this.Apply(@this => @this.Apply(getExternalLocationResult => getExternalLocationResult.ExternalLocationInfo?.CreatedBy)), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.getExternalLocations to get names of all external locations + /// * databricks.ExternalLocation to manage external locations within Unity Catalog. + /// + public static Output Invoke(GetExternalLocationInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getExternalLocation:getExternalLocation", args ?? new GetExternalLocationInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetExternalLocations.cs b/sdk/dotnet/GetExternalLocations.cs index e9e28de95..a80d2533e 100644 --- a/sdk/dotnet/GetExternalLocations.cs +++ b/sdk/dotnet/GetExternalLocations.cs @@ -82,6 +82,42 @@ public static Task InvokeAsync(GetExternalLocationsA /// public static Output Invoke(GetExternalLocationsInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getExternalLocations:getExternalLocations", args ?? new GetExternalLocationsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// Retrieves a list of databricks.ExternalLocation objects, that were created by Pulumi or manually, so that special handling could be applied. + /// + /// ## Example Usage + /// + /// List all external locations in the metastore + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetExternalLocations.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allExternalLocations"] = all.Apply(getExternalLocationsResult => getExternalLocationsResult.Names), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.ExternalLocation to get information about a single external location + /// * databricks.ExternalLocation to manage external locations within Unity Catalog. + /// + public static Output Invoke(GetExternalLocationsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getExternalLocations:getExternalLocations", args ?? new GetExternalLocationsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetFunctions.cs b/sdk/dotnet/GetFunctions.cs index cf5923881..6bb214158 100644 --- a/sdk/dotnet/GetFunctions.cs +++ b/sdk/dotnet/GetFunctions.cs @@ -88,6 +88,45 @@ public static Task InvokeAsync(GetFunctionsArgs args, Invoke /// public static Output Invoke(GetFunctionsInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getFunctions:getFunctions", args ?? new GetFunctionsInvokeArgs(), options.WithDefaults()); + + /// + /// > This data source can only be used with a workspace-level provider! + /// + /// Retrieves a list of [User-Defined Functions (UDFs) registered in the Unity Catalog](https://docs.databricks.com/en/udf/unity-catalog.html). + /// + /// ## Example Usage + /// + /// List all functions defined in a specific schema (`main.default` in this example): + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetFunctions.Invoke(new() + /// { + /// CatalogName = "main", + /// SchemaName = "default", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allExternalLocations"] = all.Apply(getFunctionsResult => getFunctionsResult.Functions), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Schema to get information about a single schema + /// + public static Output Invoke(GetFunctionsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getFunctions:getFunctions", args ?? new GetFunctionsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetGroup.cs b/sdk/dotnet/GetGroup.cs index 1580c369c..8a9d37ed4 100644 --- a/sdk/dotnet/GetGroup.cs +++ b/sdk/dotnet/GetGroup.cs @@ -110,6 +110,56 @@ public static Task InvokeAsync(GetGroupArgs args, InvokeOptions? /// public static Output Invoke(GetGroupInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getGroup:getGroup", args ?? new GetGroupInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves information about databricks.Group members, entitlements and instance profiles. + /// + /// ## Example Usage + /// + /// Adding user to administrative group + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var admins = Databricks.GetGroup.Invoke(new() + /// { + /// DisplayName = "admins", + /// }); + /// + /// var me = new Databricks.User("me", new() + /// { + /// UserName = "me@example.com", + /// }); + /// + /// var myMemberA = new Databricks.GroupMember("my_member_a", new() + /// { + /// GroupId = admins.Apply(getGroupResult => getGroupResult.Id), + /// MemberId = me.Id, + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * End to end workspace management guide + /// * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + /// * databricks.Directory to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). + /// * databricks.GroupMember to attach users and groups as group members. + /// * databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. + /// * databricks.User to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to databricks.Group within the workspace. + /// + public static Output Invoke(GetGroupInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getGroup:getGroup", args ?? new GetGroupInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetInstancePool.cs b/sdk/dotnet/GetInstancePool.cs index a2675bb2e..a7d58537d 100644 --- a/sdk/dotnet/GetInstancePool.cs +++ b/sdk/dotnet/GetInstancePool.cs @@ -76,6 +76,39 @@ public static Task InvokeAsync(GetInstancePoolArgs args, /// public static Output Invoke(GetInstancePoolInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getInstancePool:getInstancePool", args ?? new GetInstancePoolInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves information about databricks_instance_pool. + /// + /// ## Example Usage + /// + /// Referring to an instance pool by name: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var pool = Databricks.GetInstancePool.Invoke(new() + /// { + /// Name = "All spot", + /// }); + /// + /// var myCluster = new Databricks.Cluster("my_cluster", new() + /// { + /// InstancePoolId = pool.Apply(getInstancePoolResult => getInstancePoolResult.Id), + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetInstancePoolInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getInstancePool:getInstancePool", args ?? new GetInstancePoolInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetInstanceProfiles.cs b/sdk/dotnet/GetInstanceProfiles.cs index 45bf3d1f8..d338b6b44 100644 --- a/sdk/dotnet/GetInstanceProfiles.cs +++ b/sdk/dotnet/GetInstanceProfiles.cs @@ -64,6 +64,33 @@ public static Task InvokeAsync(GetInstanceProfilesArg /// public static Output Invoke(GetInstanceProfilesInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getInstanceProfiles:getInstanceProfiles", args ?? new GetInstanceProfilesInvokeArgs(), options.WithDefaults()); + + /// + /// Lists all available databricks_instance_profiles. + /// + /// ## Example Usage + /// + /// Get all instance profiles: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetInstanceProfiles.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allInstanceProfiles"] = all.Apply(getInstanceProfilesResult => getInstanceProfilesResult.InstanceProfiles), + /// }; + /// }); + /// ``` + /// + public static Output Invoke(GetInstanceProfilesInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getInstanceProfiles:getInstanceProfiles", args ?? new GetInstanceProfilesInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetJob.cs b/sdk/dotnet/GetJob.cs index 48ea800c2..c0422a9b5 100644 --- a/sdk/dotnet/GetJob.cs +++ b/sdk/dotnet/GetJob.cs @@ -88,6 +88,45 @@ public static Task InvokeAsync(GetJobArgs? args = null, InvokeOpti /// public static Output Invoke(GetJobInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getJob:getJob", args ?? new GetJobInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves the settings of databricks.Job by name or by id. Complements the feature of the databricks.getJobs data source. + /// + /// ## Example Usage + /// + /// Getting the existing cluster id of specific databricks.Job by name or by id: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetJob.Invoke(new() + /// { + /// JobName = "My job", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["jobNumWorkers"] = @this.Apply(@this => @this.Apply(getJobResult => getJobResult.JobSettings?.Settings?.NewCluster?.NumWorkers)), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.getJobs data to get all jobs and their names from a workspace. + /// * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + /// + public static Output Invoke(GetJobInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getJob:getJob", args ?? new GetJobInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetJobs.cs b/sdk/dotnet/GetJobs.cs index 0f6087f1e..0b3fa8d6a 100644 --- a/sdk/dotnet/GetJobs.cs +++ b/sdk/dotnet/GetJobs.cs @@ -148,6 +148,75 @@ public static Task InvokeAsync(GetJobsArgs? args = null, InvokeOp /// public static Output Invoke(GetJobsInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getJobs:getJobs", args ?? new GetJobsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves a list of databricks.Job ids, that were created by Pulumi or manually, so that special handling could be applied. + /// + /// > **Note** Data resource will error in case of jobs with duplicate names. + /// + /// ## Example Usage + /// + /// Granting view databricks.Permissions to all databricks.Job within the workspace: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using System.Threading.Tasks; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(async() => + /// { + /// var @this = await Databricks.GetJobs.InvokeAsync(); + /// + /// var everyoneCanViewAllJobs = new List<Databricks.Permissions>(); + /// foreach (var range in ) + /// { + /// everyoneCanViewAllJobs.Add(new Databricks.Permissions($"everyone_can_view_all_jobs-{range.Key}", new() + /// { + /// JobId = range.Value, + /// AccessControls = new[] + /// { + /// new Databricks.Inputs.PermissionsAccessControlArgs + /// { + /// GroupName = "users", + /// PermissionLevel = "CAN_VIEW", + /// }, + /// }, + /// })); + /// } + /// }); + /// ``` + /// + /// Getting ID of specific databricks.Job by name: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetJobs.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["x"] = @this.Apply(@this => $"ID of `x` job is {@this.Apply(getJobsResult => getJobsResult.Ids?.X)}"), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + /// + public static Output Invoke(GetJobsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getJobs:getJobs", args ?? new GetJobsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetMetastore.cs b/sdk/dotnet/GetMetastore.cs index 8ef2838f0..f39e38f78 100644 --- a/sdk/dotnet/GetMetastore.cs +++ b/sdk/dotnet/GetMetastore.cs @@ -124,6 +124,63 @@ public static Task InvokeAsync(GetMetastoreArgs? args = null /// public static Output Invoke(GetMetastoreInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMetastore:getMetastore", args ?? new GetMetastoreInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with an account-level provider! + /// + /// Retrieves information about metastore for a given id of databricks.Metastore object, that was created by Pulumi or manually, so that special handling could be applied. + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _authentication is not configured for provider_ errors. + /// + /// ## Example Usage + /// + /// MetastoreInfo response for a given metastore id + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Aws = Pulumi.Aws; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var metastore = new Aws.S3.BucketV2("metastore", new() + /// { + /// Bucket = $"{prefix}-metastore", + /// ForceDestroy = true, + /// }); + /// + /// var thisMetastore = new Databricks.Metastore("this", new() + /// { + /// Name = "primary", + /// StorageRoot = metastore.Id.Apply(id => $"s3://{id}/metastore"), + /// Owner = unityAdminGroup, + /// ForceDestroy = true, + /// }); + /// + /// var @this = Databricks.GetMetastore.Invoke(new() + /// { + /// MetastoreId = thisMetastore.Id, + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["someMetastore"] = @this.Apply(@this => @this.Apply(getMetastoreResult => getMetastoreResult.MetastoreInfo)), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.getMetastores to get mapping of name to id of all metastores. + /// * databricks.Metastore to manage Metastores within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetMetastoreInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMetastore:getMetastore", args ?? new GetMetastoreInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetMetastores.cs b/sdk/dotnet/GetMetastores.cs index 1dcfbf122..19f371859 100644 --- a/sdk/dotnet/GetMetastores.cs +++ b/sdk/dotnet/GetMetastores.cs @@ -88,6 +88,45 @@ public static Task InvokeAsync(GetMetastoresArgs? args = nu /// public static Output Invoke(GetMetastoresInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMetastores:getMetastores", args ?? new GetMetastoresInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with an account-level provider! + /// + /// Retrieves a mapping of name to id of databricks.Metastore objects, that were created by Pulumi or manually, so that special handling could be applied. + /// + /// > **Note** `account_id` provider configuration property is required for this resource to work. Data resource will error in case of metastores with duplicate names. This data source is only available for users & service principals with account admin status + /// + /// ## Example Usage + /// + /// Mapping of name to id of all metastores: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetMetastores.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allMetastores"] = all.Apply(getMetastoresResult => getMetastoresResult.Ids), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Metastore to get information about a single metastore. + /// * databricks.Metastore to manage Metastores within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetMetastoresInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMetastores:getMetastores", args ?? new GetMetastoresInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetMlflowExperiment.cs b/sdk/dotnet/GetMlflowExperiment.cs index 2d8825640..4d263c749 100644 --- a/sdk/dotnet/GetMlflowExperiment.cs +++ b/sdk/dotnet/GetMlflowExperiment.cs @@ -26,6 +26,14 @@ public static Task InvokeAsync(GetMlflowExperimentArg /// public static Output Invoke(GetMlflowExperimentInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMlflowExperiment:getMlflowExperiment", args ?? new GetMlflowExperimentInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves the settings of databricks.MlflowExperiment by id or name. + /// + public static Output Invoke(GetMlflowExperimentInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMlflowExperiment:getMlflowExperiment", args ?? new GetMlflowExperimentInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetMlflowModel.cs b/sdk/dotnet/GetMlflowModel.cs index d2841ae0a..c7dad4de8 100644 --- a/sdk/dotnet/GetMlflowModel.cs +++ b/sdk/dotnet/GetMlflowModel.cs @@ -178,6 +178,90 @@ public static Task InvokeAsync(GetMlflowModelArgs args, In /// public static Output Invoke(GetMlflowModelInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMlflowModel:getMlflowModel", args ?? new GetMlflowModelInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves the settings of databricks.MlflowModel by name. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var thisMlflowModel = new Databricks.MlflowModel("this", new() + /// { + /// Name = "My MLflow Model", + /// Description = "My MLflow model description", + /// Tags = new[] + /// { + /// new Databricks.Inputs.MlflowModelTagArgs + /// { + /// Key = "key1", + /// Value = "value1", + /// }, + /// new Databricks.Inputs.MlflowModelTagArgs + /// { + /// Key = "key2", + /// Value = "value2", + /// }, + /// }, + /// }); + /// + /// var @this = Databricks.GetMlflowModel.Invoke(new() + /// { + /// Name = "My MLflow Model", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["model"] = @this, + /// }; + /// }); + /// ``` + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetMlflowModel.Invoke(new() + /// { + /// Name = "My MLflow Model with multiple versions", + /// }); + /// + /// var thisModelServing = new Databricks.ModelServing("this", new() + /// { + /// Name = "model-serving-endpoint", + /// Config = new Databricks.Inputs.ModelServingConfigArgs + /// { + /// ServedModels = new[] + /// { + /// new Databricks.Inputs.ModelServingConfigServedModelArgs + /// { + /// Name = "model_serving_prod", + /// ModelName = @this.Apply(@this => @this.Apply(getMlflowModelResult => getMlflowModelResult.Name)), + /// ModelVersion = @this.Apply(@this => @this.Apply(getMlflowModelResult => getMlflowModelResult.LatestVersions[0]?.Version)), + /// WorkloadSize = "Small", + /// ScaleToZeroEnabled = true, + /// }, + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetMlflowModelInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMlflowModel:getMlflowModel", args ?? new GetMlflowModelInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetMlflowModels.cs b/sdk/dotnet/GetMlflowModels.cs index 6ed37d346..c5be24942 100644 --- a/sdk/dotnet/GetMlflowModels.cs +++ b/sdk/dotnet/GetMlflowModels.cs @@ -64,6 +64,33 @@ public static Task InvokeAsync(GetMlflowModelsArgs? args /// public static Output Invoke(GetMlflowModelsInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMlflowModels:getMlflowModels", args ?? new GetMlflowModelsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source could be only used with workspace-level provider! + /// + /// Retrieves a list of databricks.MlflowModel objects, that were created by Pulumi or manually, so that special handling could be applied. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetMlflowModels.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["model"] = @this, + /// }; + /// }); + /// ``` + /// + public static Output Invoke(GetMlflowModelsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMlflowModels:getMlflowModels", args ?? new GetMlflowModelsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetMwsCredentials.cs b/sdk/dotnet/GetMwsCredentials.cs index 06205b59b..ba98a4058 100644 --- a/sdk/dotnet/GetMwsCredentials.cs +++ b/sdk/dotnet/GetMwsCredentials.cs @@ -94,6 +94,48 @@ public static Task InvokeAsync(GetMwsCredentialsArgs? a /// public static Output Invoke(GetMwsCredentialsInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMwsCredentials:getMwsCredentials", args ?? new GetMwsCredentialsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Lists all databricks.MwsCredentials in Databricks Account. + /// + /// > **Note** `account_id` provider configuration property is required for this resource to work. + /// + /// ## Example Usage + /// + /// Listing all credentials in Databricks Account + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetMwsCredentials.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allMwsCredentials"] = all.Apply(getMwsCredentialsResult => getMwsCredentialsResult.Ids), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * Provisioning Databricks on AWS guide. + /// * databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. + /// * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). + /// * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. + /// * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. + /// * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + /// + public static Output Invoke(GetMwsCredentialsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMwsCredentials:getMwsCredentials", args ?? new GetMwsCredentialsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetMwsWorkspaces.cs b/sdk/dotnet/GetMwsWorkspaces.cs index 3ecc931a7..af9cd5d9f 100644 --- a/sdk/dotnet/GetMwsWorkspaces.cs +++ b/sdk/dotnet/GetMwsWorkspaces.cs @@ -86,6 +86,44 @@ public static Task InvokeAsync(GetMwsWorkspacesArgs? arg /// public static Output Invoke(GetMwsWorkspacesInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMwsWorkspaces:getMwsWorkspaces", args ?? new GetMwsWorkspacesInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Lists all databricks.MwsWorkspaces in Databricks Account. + /// + /// > **Note** `account_id` provider configuration property is required for this resource to work. + /// + /// ## Example Usage + /// + /// Listing all workspaces in + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetMwsWorkspaces.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allMwsWorkspaces"] = all.Apply(getMwsWorkspacesResult => getMwsWorkspacesResult.Ids), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. + /// * databricks.MetastoreAssignment to assign databricks.Metastore to databricks.MwsWorkspaces or azurerm_databricks_workspace + /// + public static Output Invoke(GetMwsWorkspacesInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMwsWorkspaces:getMwsWorkspaces", args ?? new GetMwsWorkspacesInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetNodeType.cs b/sdk/dotnet/GetNodeType.cs index cbd50986b..974a6be07 100644 --- a/sdk/dotnet/GetNodeType.cs +++ b/sdk/dotnet/GetNodeType.cs @@ -130,6 +130,66 @@ public static Task InvokeAsync(GetNodeTypeArgs? args = null, /// public static Output Invoke(GetNodeTypeInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getNodeType:getNodeType", args ?? new GetNodeTypeInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Gets the smallest node type for databricks.Cluster that fits search criteria, like amount of RAM or number of cores. [AWS](https://databricks.com/product/aws-pricing/instance-types) or [Azure](https://azure.microsoft.com/en-us/pricing/details/databricks/). Internally data source fetches [node types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types) available per cloud, similar to executing `databricks clusters list-node-types`, and filters it to return the smallest possible node with criteria. + /// + /// > **Note** This is experimental functionality, which aims to simplify things. In case of wrong parameters given (e.g. `min_gpus = 876`) or no nodes matching, data source will return cloud-default node type, even though it doesn't match search criteria specified by data source arguments: [i3.xlarge](https://aws.amazon.com/ec2/instance-types/i3/) for AWS or [Standard_D3_v2](https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs#dv2-series) for Azure. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var withGpu = Databricks.GetNodeType.Invoke(new() + /// { + /// LocalDisk = true, + /// MinCores = 16, + /// GbPerCore = 1, + /// MinGpus = 1, + /// }); + /// + /// var gpuMl = Databricks.GetSparkVersion.Invoke(new() + /// { + /// Gpu = true, + /// Ml = true, + /// }); + /// + /// var research = new Databricks.Cluster("research", new() + /// { + /// ClusterName = "Research Cluster", + /// SparkVersion = gpuMl.Apply(getSparkVersionResult => getSparkVersionResult.Id), + /// NodeTypeId = withGpu.Apply(getNodeTypeResult => getNodeTypeResult.Id), + /// AutoterminationMinutes = 20, + /// Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs + /// { + /// MinWorkers = 1, + /// MaxWorkers = 50, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * End to end workspace management guide. + /// * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + /// * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. + /// * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. + /// * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + /// + public static Output Invoke(GetNodeTypeInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getNodeType:getNodeType", args ?? new GetNodeTypeInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetNotebook.cs b/sdk/dotnet/GetNotebook.cs index 4cbd385a5..519edaf1d 100644 --- a/sdk/dotnet/GetNotebook.cs +++ b/sdk/dotnet/GetNotebook.cs @@ -64,6 +64,33 @@ public static Task InvokeAsync(GetNotebookArgs args, InvokeOp /// public static Output Invoke(GetNotebookInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getNotebook:getNotebook", args ?? new GetNotebookInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// This data source allows to export a notebook from Databricks Workspace. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var features = Databricks.GetNotebook.Invoke(new() + /// { + /// Path = "/Production/Features", + /// Format = "SOURCE", + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetNotebookInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getNotebook:getNotebook", args ?? new GetNotebookInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetNotebookPaths.cs b/sdk/dotnet/GetNotebookPaths.cs index d26392f89..8872fcf0b 100644 --- a/sdk/dotnet/GetNotebookPaths.cs +++ b/sdk/dotnet/GetNotebookPaths.cs @@ -64,6 +64,33 @@ public static Task InvokeAsync(GetNotebookPathsArgs args /// public static Output Invoke(GetNotebookPathsInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getNotebookPaths:getNotebookPaths", args ?? new GetNotebookPathsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// This data source allows to list notebooks in the Databricks Workspace. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var prod = Databricks.GetNotebookPaths.Invoke(new() + /// { + /// Path = "/Production", + /// Recursive = true, + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetNotebookPathsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getNotebookPaths:getNotebookPaths", args ?? new GetNotebookPathsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetNotificationDestinations.cs b/sdk/dotnet/GetNotificationDestinations.cs index 1f84c6b0e..90e69a7b5 100644 --- a/sdk/dotnet/GetNotificationDestinations.cs +++ b/sdk/dotnet/GetNotificationDestinations.cs @@ -122,6 +122,62 @@ public static Task InvokeAsync(GetNotificatio /// public static Output Invoke(GetNotificationDestinationsInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getNotificationDestinations:getNotificationDestinations", args ?? new GetNotificationDestinationsInvokeArgs(), options.WithDefaults()); + + /// + /// This data source allows you to retrieve information about [Notification Destinations](https://docs.databricks.com/api/workspace/notificationdestinations). Notification Destinations are used to send notifications for query alerts and jobs to external systems such as email, Slack, Microsoft Teams, PagerDuty, or generic webhooks. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var email = new Databricks.NotificationDestination("email", new() + /// { + /// DisplayName = "Email Destination", + /// Config = new Databricks.Inputs.NotificationDestinationConfigArgs + /// { + /// Email = new Databricks.Inputs.NotificationDestinationConfigEmailArgs + /// { + /// Addresses = new[] + /// { + /// "abc@gmail.com", + /// }, + /// }, + /// }, + /// }); + /// + /// var slack = new Databricks.NotificationDestination("slack", new() + /// { + /// DisplayName = "Slack Destination", + /// Config = new Databricks.Inputs.NotificationDestinationConfigArgs + /// { + /// Slack = new Databricks.Inputs.NotificationDestinationConfigSlackArgs + /// { + /// Url = "https://hooks.slack.com/services/...", + /// }, + /// }, + /// }); + /// + /// // Lists all notification desitnations + /// var @this = Databricks.GetNotificationDestinations.Invoke(); + /// + /// // List destinations of specific type and name + /// var filteredNotification = Databricks.GetNotificationDestinations.Invoke(new() + /// { + /// DisplayNameContains = "Destination", + /// Type = "EMAIL", + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetNotificationDestinationsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getNotificationDestinations:getNotificationDestinations", args ?? new GetNotificationDestinationsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetPipelines.cs b/sdk/dotnet/GetPipelines.cs index bb9c1a0fa..e6108587d 100644 --- a/sdk/dotnet/GetPipelines.cs +++ b/sdk/dotnet/GetPipelines.cs @@ -176,6 +176,89 @@ public static Task InvokeAsync(GetPipelinesArgs? args = null /// public static Output Invoke(GetPipelinesInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getPipelines:getPipelines", args ?? new GetPipelinesInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _authentication is not configured for provider_ errors. + /// + /// Retrieves a list of all databricks.Pipeline ([Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html)) ids deployed in a workspace, or those matching the provided search term. Maximum 100 results. + /// + /// ## Example Usage + /// + /// Get all Delta Live Tables pipelines: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetPipelines.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allPipelines"] = all.Apply(getPipelinesResult => getPipelinesResult.Ids), + /// }; + /// }); + /// ``` + /// + /// Filter Delta Live Tables pipelines by name (exact match): + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetPipelines.Invoke(new() + /// { + /// PipelineName = "my_pipeline", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["myPipeline"] = @this.Apply(@this => @this.Apply(getPipelinesResult => getPipelinesResult.Ids)), + /// }; + /// }); + /// ``` + /// + /// Filter Delta Live Tables pipelines by name (wildcard search): + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetPipelines.Invoke(new() + /// { + /// PipelineName = "%pipeline%", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["wildcardPipelines"] = @this.Apply(@this => @this.Apply(getPipelinesResult => getPipelinesResult.Ids)), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * End to end workspace management guide. + /// * databricks.Pipeline to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). + /// * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + /// * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + /// * databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). + /// + public static Output Invoke(GetPipelinesInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getPipelines:getPipelines", args ?? new GetPipelinesInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetRegisteredModel.cs b/sdk/dotnet/GetRegisteredModel.cs index 75fa62295..031feb7c8 100644 --- a/sdk/dotnet/GetRegisteredModel.cs +++ b/sdk/dotnet/GetRegisteredModel.cs @@ -78,6 +78,40 @@ public static Task InvokeAsync(GetRegisteredModelArgs /// public static Output Invoke(GetRegisteredModelInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getRegisteredModel:getRegisteredModel", args ?? new GetRegisteredModelInvokeArgs(), options.WithDefaults()); + + /// + /// > This resource can only be used with a workspace-level provider! + /// + /// This resource allows you to get information about [Model in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetRegisteredModel.Invoke(new() + /// { + /// FullName = "main.default.my_model", + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are often used in the same context: + /// + /// * databricks.RegisteredModel resource to manage models within Unity Catalog. + /// * databricks.ModelServing to serve this model on a Databricks serving endpoint. + /// * databricks.MlflowExperiment to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks. + /// + public static Output Invoke(GetRegisteredModelInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getRegisteredModel:getRegisteredModel", args ?? new GetRegisteredModelInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetSchema.cs b/sdk/dotnet/GetSchema.cs index 9691a6b8f..ef21dc69a 100644 --- a/sdk/dotnet/GetSchema.cs +++ b/sdk/dotnet/GetSchema.cs @@ -118,6 +118,60 @@ public static Task InvokeAsync(GetSchemaArgs args, InvokeOption /// public static Output Invoke(GetSchemaInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSchema:getSchema", args ?? new GetSchemaInvokeArgs(), options.WithDefaults()); + + /// + /// Retrieves details about databricks.Schema that was created by Pulumi or manually. + /// A schema can be identified by its two-level (fully qualified) name (in the form of: `catalog_name`.`schema_name`) as input. This can be retrieved programmatically using databricks.getSchemas data source. + /// + /// ## Example Usage + /// + /// * Retrieve details of all schemas in in a _sandbox_ databricks_catalog: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetSchemas.Invoke(new() + /// { + /// CatalogName = "sandbox", + /// }); + /// + /// var @this = ; + /// + /// }); + /// ``` + /// + /// * Search for a specific schema by its fully qualified name: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetSchema.Invoke(new() + /// { + /// Name = "catalog.schema", + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Schema to manage schemas within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetSchemaInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSchema:getSchema", args ?? new GetSchemaInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetSchemas.cs b/sdk/dotnet/GetSchemas.cs index db24f1b90..f46a1658f 100644 --- a/sdk/dotnet/GetSchemas.cs +++ b/sdk/dotnet/GetSchemas.cs @@ -92,6 +92,47 @@ public static Task InvokeAsync(GetSchemasArgs args, InvokeOpti /// public static Output Invoke(GetSchemasInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSchemas:getSchemas", args ?? new GetSchemasInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves a list of databricks.Schema ids, that were created by Pulumi or manually, so that special handling could be applied. + /// + /// ## Example Usage + /// + /// Listing all schemas in a _sandbox_ databricks_catalog: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var sandbox = Databricks.GetSchemas.Invoke(new() + /// { + /// CatalogName = "sandbox", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allSandboxSchemas"] = sandbox, + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Schema to manage schemas within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetSchemasInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSchemas:getSchemas", args ?? new GetSchemasInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetServicePrincipal.cs b/sdk/dotnet/GetServicePrincipal.cs index 1f20f7e51..4379878da 100644 --- a/sdk/dotnet/GetServicePrincipal.cs +++ b/sdk/dotnet/GetServicePrincipal.cs @@ -114,6 +114,58 @@ public static Task InvokeAsync(GetServicePrincipalArg /// public static Output Invoke(GetServicePrincipalInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getServicePrincipal:getServicePrincipal", args ?? new GetServicePrincipalInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves information about databricks_service_principal. + /// + /// ## Example Usage + /// + /// Adding service principal `11111111-2222-3333-4444-555666777888` to administrative group + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var admins = Databricks.GetGroup.Invoke(new() + /// { + /// DisplayName = "admins", + /// }); + /// + /// var spn = Databricks.GetServicePrincipal.Invoke(new() + /// { + /// ApplicationId = "11111111-2222-3333-4444-555666777888", + /// }); + /// + /// var myMemberA = new Databricks.GroupMember("my_member_a", new() + /// { + /// GroupId = admins.Apply(getGroupResult => getGroupResult.Id), + /// MemberId = spn.Apply(getServicePrincipalResult => getServicePrincipalResult.Id), + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// - End to end workspace management guide. + /// - databricks.getCurrentUser data to retrieve information about databricks.User or databricks_service_principal, that is calling Databricks REST API. + /// - databricks.Group to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). + /// - databricks.Group data to retrieve information about databricks.Group members, entitlements and instance profiles. + /// - databricks.GroupInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_group. + /// - databricks.GroupMember to attach users and groups as group members. + /// - databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. + /// - databricks_service principal to manage service principals + /// + public static Output Invoke(GetServicePrincipalInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getServicePrincipal:getServicePrincipal", args ?? new GetServicePrincipalInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetServicePrincipals.cs b/sdk/dotnet/GetServicePrincipals.cs index f83260f96..ef5c2d855 100644 --- a/sdk/dotnet/GetServicePrincipals.cs +++ b/sdk/dotnet/GetServicePrincipals.cs @@ -26,6 +26,14 @@ public static Task InvokeAsync(GetServicePrincipalsA /// public static Output Invoke(GetServicePrincipalsInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getServicePrincipals:getServicePrincipals", args ?? new GetServicePrincipalsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves `application_ids` of all databricks.ServicePrincipal based on their `display_name` + /// + public static Output Invoke(GetServicePrincipalsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getServicePrincipals:getServicePrincipals", args ?? new GetServicePrincipalsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetShare.cs b/sdk/dotnet/GetShare.cs index 0d54e20dc..79763efc7 100644 --- a/sdk/dotnet/GetShare.cs +++ b/sdk/dotnet/GetShare.cs @@ -86,6 +86,44 @@ public static Task InvokeAsync(GetShareArgs? args = null, Invoke /// public static Output Invoke(GetShareInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getShare:getShare", args ?? new GetShareInvokeArgs(), options.WithDefaults()); + + /// + /// Retrieves details about a databricks.Share that were created by Pulumi or manually. + /// + /// ## Example Usage + /// + /// Getting details of an existing share in the metastore + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetShare.Invoke(new() + /// { + /// Name = "this", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["createdBy"] = @this.Apply(@this => @this.Apply(getShareResult => getShareResult.CreatedBy)), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Share to create Delta Sharing shares. + /// * databricks.Recipient to create Delta Sharing recipients. + /// * databricks.Grants to manage Delta Sharing permissions. + /// + public static Output Invoke(GetShareInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getShare:getShare", args ?? new GetShareInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetShares.cs b/sdk/dotnet/GetShares.cs index 540c15fd5..71c6085f1 100644 --- a/sdk/dotnet/GetShares.cs +++ b/sdk/dotnet/GetShares.cs @@ -80,6 +80,41 @@ public static Task InvokeAsync(GetSharesArgs? args = null, Invo /// public static Output Invoke(GetSharesInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getShares:getShares", args ?? new GetSharesInvokeArgs(), options.WithDefaults()); + + /// + /// Retrieves a list of databricks.Share name, that were created by Pulumi or manually. + /// + /// ## Example Usage + /// + /// Getting all existing shares in the metastore + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetShares.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["shareName"] = @this.Apply(@this => @this.Apply(getSharesResult => getSharesResult.Shares)), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Share to create Delta Sharing shares. + /// * databricks.Recipient to create Delta Sharing recipients. + /// * databricks.Grants to manage Delta Sharing permissions. + /// + public static Output Invoke(GetSharesInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getShares:getShares", args ?? new GetSharesInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetSparkVersion.cs b/sdk/dotnet/GetSparkVersion.cs index 18c153b4d..df34c9ad8 100644 --- a/sdk/dotnet/GetSparkVersion.cs +++ b/sdk/dotnet/GetSparkVersion.cs @@ -130,6 +130,66 @@ public static Task InvokeAsync(GetSparkVersionArgs? args /// public static Output Invoke(GetSparkVersionInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSparkVersion:getSparkVersion", args ?? new GetSparkVersionInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Gets [Databricks Runtime (DBR)](https://docs.databricks.com/runtime/dbr.html) version that could be used for `spark_version` parameter in databricks.Cluster and other resources that fits search criteria, like specific Spark or Scala version, ML or Genomics runtime, etc., similar to executing `databricks clusters spark-versions`, and filters it to return the latest version that matches criteria. Often used along databricks.getNodeType data source. + /// + /// > **Note** This is experimental functionality, which aims to simplify things. In case of wrong parameters given (e.g. together `ml = true` and `genomics = true`, or something like), data source will throw an error. Similarly, if search returns multiple results, and `latest = false`, data source will throw an error. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var withGpu = Databricks.GetNodeType.Invoke(new() + /// { + /// LocalDisk = true, + /// MinCores = 16, + /// GbPerCore = 1, + /// MinGpus = 1, + /// }); + /// + /// var gpuMl = Databricks.GetSparkVersion.Invoke(new() + /// { + /// Gpu = true, + /// Ml = true, + /// }); + /// + /// var research = new Databricks.Cluster("research", new() + /// { + /// ClusterName = "Research Cluster", + /// SparkVersion = gpuMl.Apply(getSparkVersionResult => getSparkVersionResult.Id), + /// NodeTypeId = withGpu.Apply(getNodeTypeResult => getNodeTypeResult.Id), + /// AutoterminationMinutes = 20, + /// Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs + /// { + /// MinWorkers = 1, + /// MaxWorkers = 50, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * End to end workspace management guide. + /// * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + /// * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. + /// * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. + /// * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + /// + public static Output Invoke(GetSparkVersionInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSparkVersion:getSparkVersion", args ?? new GetSparkVersionInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetSqlWarehouse.cs b/sdk/dotnet/GetSqlWarehouse.cs index e5fa78fe9..76245f7a7 100644 --- a/sdk/dotnet/GetSqlWarehouse.cs +++ b/sdk/dotnet/GetSqlWarehouse.cs @@ -120,6 +120,61 @@ public static Task InvokeAsync(GetSqlWarehouseArgs? args /// public static Output Invoke(GetSqlWarehouseInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSqlWarehouse:getSqlWarehouse", args ?? new GetSqlWarehouseInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves information about a databricks.getSqlWarehouse using its id. This could be retrieved programmatically using databricks.getSqlWarehouses data source. + /// + /// ## Example Usage + /// + /// * Retrieve attributes of each SQL warehouses in a workspace: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetSqlWarehouses.Invoke(); + /// + /// var @this = ; + /// + /// }); + /// ``` + /// + /// * Search for a specific SQL Warehouse by name: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetSqlWarehouse.Invoke(new() + /// { + /// Name = "Starter Warehouse", + /// }); + /// + /// }); + /// ``` + /// + /// ## Related resources + /// + /// The following resources are often used in the same context: + /// + /// * End to end workspace management guide. + /// * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. + /// * databricks.SqlDashboard to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). + /// * databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all databricks.getSqlWarehouse of workspace. + /// * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). + /// + public static Output Invoke(GetSqlWarehouseInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSqlWarehouse:getSqlWarehouse", args ?? new GetSqlWarehouseInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetSqlWarehouses.cs b/sdk/dotnet/GetSqlWarehouses.cs index 564532ca5..ce8074c11 100644 --- a/sdk/dotnet/GetSqlWarehouses.cs +++ b/sdk/dotnet/GetSqlWarehouses.cs @@ -116,6 +116,59 @@ public static Task InvokeAsync(GetSqlWarehousesArgs? arg /// public static Output Invoke(GetSqlWarehousesInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSqlWarehouses:getSqlWarehouses", args ?? new GetSqlWarehousesInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves a list of databricks.SqlEndpoint ids, that were created by Pulumi or manually. + /// + /// ## Example Usage + /// + /// Retrieve IDs for all SQL warehouses: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetSqlWarehouses.Invoke(); + /// + /// }); + /// ``` + /// + /// Retrieve IDs for all clusters having "Shared" in the warehouse name: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var allShared = Databricks.GetSqlWarehouses.Invoke(new() + /// { + /// WarehouseNameContains = "shared", + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are often used in the same context: + /// + /// * End to end workspace management guide. + /// * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. + /// * databricks.SqlDashboard to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). + /// * databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all databricks.getSqlWarehouse of workspace. + /// * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). + /// + public static Output Invoke(GetSqlWarehousesInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getSqlWarehouses:getSqlWarehouses", args ?? new GetSqlWarehousesInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetStorageCredential.cs b/sdk/dotnet/GetStorageCredential.cs index ae839719d..b586815b7 100644 --- a/sdk/dotnet/GetStorageCredential.cs +++ b/sdk/dotnet/GetStorageCredential.cs @@ -88,6 +88,45 @@ public static Task InvokeAsync(GetStorageCredentialA /// public static Output Invoke(GetStorageCredentialInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getStorageCredential:getStorageCredential", args ?? new GetStorageCredentialInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// Retrieves details about a databricks.StorageCredential that were created by Pulumi or manually. + /// + /// ## Example Usage + /// + /// Getting details of an existing storage credential in the metastore + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetStorageCredential.Invoke(new() + /// { + /// Name = "this", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["createdBy"] = @this.Apply(@this => @this.Apply(getStorageCredentialResult => getStorageCredentialResult.StorageCredentialInfo?.CreatedBy)), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.getStorageCredentials to get names of all credentials + /// * databricks.StorageCredential to manage Storage Credentials within Unity Catalog. + /// + public static Output Invoke(GetStorageCredentialInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getStorageCredential:getStorageCredential", args ?? new GetStorageCredentialInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetStorageCredentials.cs b/sdk/dotnet/GetStorageCredentials.cs index afdce1087..c05903daa 100644 --- a/sdk/dotnet/GetStorageCredentials.cs +++ b/sdk/dotnet/GetStorageCredentials.cs @@ -82,6 +82,42 @@ public static Task InvokeAsync(GetStorageCredential /// public static Output Invoke(GetStorageCredentialsInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getStorageCredentials:getStorageCredentials", args ?? new GetStorageCredentialsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// Retrieves a list of databricks.StorageCredential objects, that were created by Pulumi or manually, so that special handling could be applied. + /// + /// ## Example Usage + /// + /// List all storage credentials in the metastore + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetStorageCredentials.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allStorageCredentials"] = all.Apply(getStorageCredentialsResult => getStorageCredentialsResult.Names), + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.StorageCredential to get information about a single credential + /// * databricks.StorageCredential to manage Storage Credentials within Unity Catalog. + /// + public static Output Invoke(GetStorageCredentialsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getStorageCredentials:getStorageCredentials", args ?? new GetStorageCredentialsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetTable.cs b/sdk/dotnet/GetTable.cs index d38921b61..4d7bf167c 100644 --- a/sdk/dotnet/GetTable.cs +++ b/sdk/dotnet/GetTable.cs @@ -118,6 +118,60 @@ public static Task InvokeAsync(GetTableArgs args, InvokeOptions? /// public static Output Invoke(GetTableInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getTable:getTable", args ?? new GetTableInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves details of a specific table in Unity Catalog, that were created by Pulumi or manually. Use databricks.getTables to retrieve multiple tables in Unity Catalog + /// + /// ## Example Usage + /// + /// Read on a specific table `main.certified.fct_transactions`: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var fctTransactions = Databricks.GetTable.Invoke(new() + /// { + /// Name = "main.certified.fct_transactions", + /// }); + /// + /// var things = new Databricks.Grants("things", new() + /// { + /// Table = fctTransactions.Apply(getTableResult => getTableResult.Name), + /// GrantDetails = new[] + /// { + /// new Databricks.Inputs.GrantsGrantArgs + /// { + /// Principal = "sensitive", + /// Privileges = new[] + /// { + /// "SELECT", + /// "MODIFY", + /// }, + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Grant to manage grants within Unity Catalog. + /// * databricks.getTables to list all tables within a schema in Unity Catalog. + /// + public static Output Invoke(GetTableInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getTable:getTable", args ?? new GetTableInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetTables.cs b/sdk/dotnet/GetTables.cs index fbd8b9d22..d734be177 100644 --- a/sdk/dotnet/GetTables.cs +++ b/sdk/dotnet/GetTables.cs @@ -128,6 +128,65 @@ public static Task InvokeAsync(GetTablesArgs args, InvokeOption /// public static Output Invoke(GetTablesInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getTables:getTables", args ?? new GetTablesInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves a list of managed or external table full names in Unity Catalog, that were created by Pulumi or manually. Use databricks.getViews for retrieving a list of views. + /// + /// ## Example Usage + /// + /// Granting `SELECT` and `MODIFY` to `sensitive` group on all tables a _things_ databricks.Schema from _sandbox_ databricks_catalog: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using System.Threading.Tasks; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(async() => + /// { + /// var things = await Databricks.GetTables.InvokeAsync(new() + /// { + /// CatalogName = "sandbox", + /// SchemaName = "things", + /// }); + /// + /// var thingsGrants = new List<Databricks.Grants>(); + /// foreach (var range in ) + /// { + /// thingsGrants.Add(new Databricks.Grants($"things-{range.Key}", new() + /// { + /// Table = range.Value, + /// GrantDetails = new[] + /// { + /// new Databricks.Inputs.GrantsGrantArgs + /// { + /// Principal = "sensitive", + /// Privileges = new[] + /// { + /// "SELECT", + /// "MODIFY", + /// }, + /// }, + /// }, + /// })); + /// } + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Schema to manage schemas within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetTablesInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getTables:getTables", args ?? new GetTablesInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetUser.cs b/sdk/dotnet/GetUser.cs index e1ca2a563..254e579d8 100644 --- a/sdk/dotnet/GetUser.cs +++ b/sdk/dotnet/GetUser.cs @@ -116,6 +116,59 @@ public static Task InvokeAsync(GetUserArgs? args = null, InvokeOp /// public static Output Invoke(GetUserInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getUser:getUser", args ?? new GetUserInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves information about databricks_user. + /// + /// ## Example Usage + /// + /// Adding user to administrative group + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var admins = Databricks.GetGroup.Invoke(new() + /// { + /// DisplayName = "admins", + /// }); + /// + /// var me = Databricks.GetUser.Invoke(new() + /// { + /// UserName = "me@example.com", + /// }); + /// + /// var myMemberA = new Databricks.GroupMember("my_member_a", new() + /// { + /// GroupId = admins.Apply(getGroupResult => getGroupResult.Id), + /// MemberId = me.Apply(getUserResult => getUserResult.Id), + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// - End to end workspace management guide. + /// - databricks.getCurrentUser data to retrieve information about databricks.User or databricks_service_principal, that is calling Databricks REST API. + /// - databricks.Group to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). + /// - databricks.Group data to retrieve information about databricks.Group members, entitlements and instance profiles. + /// - databricks.GroupInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_group. + /// - databricks.GroupMember to attach users and groups as group members. + /// - databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. + /// - databricks.User to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to databricks.Group within the workspace. + /// - databricks.UserInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_user. + /// + public static Output Invoke(GetUserInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getUser:getUser", args ?? new GetUserInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetViews.cs b/sdk/dotnet/GetViews.cs index d153a439f..e56c9b2a9 100644 --- a/sdk/dotnet/GetViews.cs +++ b/sdk/dotnet/GetViews.cs @@ -124,6 +124,63 @@ public static Task InvokeAsync(GetViewsArgs args, InvokeOptions? /// public static Output Invoke(GetViewsInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getViews:getViews", args ?? new GetViewsInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// Retrieves a list of view full names in Unity Catalog, that were created by Pulumi or manually. Use databricks.getTables for retrieving a list of tables. + /// + /// ## Example Usage + /// + /// Granting `SELECT` and `MODIFY` to `sensitive` group on all views in a _things_ databricks.Schema from _sandbox_ databricks_catalog. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using System.Threading.Tasks; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(async() => + /// { + /// var things = await Databricks.GetViews.InvokeAsync(new() + /// { + /// CatalogName = "sandbox", + /// SchemaName = "things", + /// }); + /// + /// var thingsGrants = new List<Databricks.Grants>(); + /// foreach (var range in ) + /// { + /// thingsGrants.Add(new Databricks.Grants($"things-{range.Key}", new() + /// { + /// Table = range.Value, + /// GrantDetails = new[] + /// { + /// new Databricks.Inputs.GrantsGrantArgs + /// { + /// Principal = "sensitive", + /// Privileges = new[] + /// { + /// "SELECT", + /// "MODIFY", + /// }, + /// }, + /// }, + /// })); + /// } + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Schema to manage schemas within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetViewsInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getViews:getViews", args ?? new GetViewsInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetVolume.cs b/sdk/dotnet/GetVolume.cs index ac5551c87..c97606bbd 100644 --- a/sdk/dotnet/GetVolume.cs +++ b/sdk/dotnet/GetVolume.cs @@ -122,6 +122,62 @@ public static Task InvokeAsync(GetVolumeArgs args, InvokeOption /// public static Output Invoke(GetVolumeInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getVolume:getVolume", args ?? new GetVolumeInvokeArgs(), options.WithDefaults()); + + /// + /// Retrieves details about databricks.Volume that was created by Pulumi or manually. + /// A volume can be identified by its three-level (fully qualified) name (in the form of: `catalog_name`.`schema_name`.`volume_name`) as input. This can be retrieved programmatically using databricks.getVolumes data source. + /// + /// ## Example Usage + /// + /// * Retrieve details of all volumes in in a _things_ databricks.Schema of a _sandbox_ databricks_catalog: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var all = Databricks.GetVolumes.Invoke(new() + /// { + /// CatalogName = "sandbox", + /// SchemaName = "things", + /// }); + /// + /// var @this = ; + /// + /// }); + /// ``` + /// + /// * Search for a specific volume by its fully qualified name + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetVolume.Invoke(new() + /// { + /// Name = "catalog.schema.volume", + /// }); + /// + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Volume to manage volumes within Unity Catalog. + /// * databricks.Schema to manage schemas within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetVolumeInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getVolume:getVolume", args ?? new GetVolumeInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetVolumes.cs b/sdk/dotnet/GetVolumes.cs index 0277d4f59..328e301c6 100644 --- a/sdk/dotnet/GetVolumes.cs +++ b/sdk/dotnet/GetVolumes.cs @@ -100,6 +100,51 @@ public static Task InvokeAsync(GetVolumesArgs args, InvokeOpti /// public static Output Invoke(GetVolumesInvokeArgs args, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getVolumes:getVolumes", args ?? new GetVolumesInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** This data source can only be used with a workspace-level provider! + /// + /// Retrieves a list of databricks.Volume ids (full names), that were created by Pulumi or manually. + /// + /// ## Plugin Framework Migration + /// + /// The volumes data source has been migrated from sdkv2 to plugin framework in version 1.57。 If you encounter any problem with this data source and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way `export USE_SDK_V2_DATA_SOURCES="databricks.getVolumes"`. + /// + /// ## Example Usage + /// + /// Listing all volumes in a _things_ databricks.Schema of a _sandbox_ databricks_catalog: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetVolumes.Invoke(new() + /// { + /// CatalogName = "sandbox", + /// SchemaName = "things", + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["allVolumes"] = @this, + /// }; + /// }); + /// ``` + /// + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Volume to manage volumes within Unity Catalog. + /// * databricks.Schema to manage schemas within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetVolumesInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getVolumes:getVolumes", args ?? new GetVolumesInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/GetZones.cs b/sdk/dotnet/GetZones.cs index 0c1804691..0f6fa1396 100644 --- a/sdk/dotnet/GetZones.cs +++ b/sdk/dotnet/GetZones.cs @@ -56,6 +56,29 @@ public static Task InvokeAsync(GetZonesArgs? args = null, Invoke /// public static Output Invoke(GetZonesInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getZones:getZones", args ?? new GetZonesInvokeArgs(), options.WithDefaults()); + + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + /// + /// This data source allows you to fetch all available AWS availability zones on your workspace on AWS. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var zones = Databricks.GetZones.Invoke(); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetZonesInvokeArgs args, InvokeOutputOptions options) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getZones:getZones", args ?? new GetZonesInvokeArgs(), options.WithDefaults()); } diff --git a/sdk/dotnet/Utilities.cs b/sdk/dotnet/Utilities.cs index 851bc8f70..6ed604983 100644 --- a/sdk/dotnet/Utilities.cs +++ b/sdk/dotnet/Utilities.cs @@ -56,6 +56,13 @@ static class Utilities return dst; } + public static global::Pulumi.InvokeOutputOptions WithDefaults(this global::Pulumi.InvokeOutputOptions? src) + { + var dst = src ?? new global::Pulumi.InvokeOutputOptions{}; + dst.Version = src?.Version ?? Version; + return dst; + } + private readonly static string version; public static string Version => version; diff --git a/sdk/go/databricks/getAwsAssumeRolePolicy.go b/sdk/go/databricks/getAwsAssumeRolePolicy.go index 86f215abf..25675c6f6 100644 --- a/sdk/go/databricks/getAwsAssumeRolePolicy.go +++ b/sdk/go/databricks/getAwsAssumeRolePolicy.go @@ -121,21 +121,11 @@ type GetAwsAssumeRolePolicyResult struct { } func GetAwsAssumeRolePolicyOutput(ctx *pulumi.Context, args GetAwsAssumeRolePolicyOutputArgs, opts ...pulumi.InvokeOption) GetAwsAssumeRolePolicyResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetAwsAssumeRolePolicyResultOutput, error) { args := v.(GetAwsAssumeRolePolicyArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetAwsAssumeRolePolicyResult - secret, err := ctx.InvokePackageRaw("databricks:index/getAwsAssumeRolePolicy:getAwsAssumeRolePolicy", args, &rv, "", opts...) - if err != nil { - return GetAwsAssumeRolePolicyResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetAwsAssumeRolePolicyResultOutput) - if secret { - return pulumi.ToSecret(output).(GetAwsAssumeRolePolicyResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getAwsAssumeRolePolicy:getAwsAssumeRolePolicy", args, GetAwsAssumeRolePolicyResultOutput{}, options).(GetAwsAssumeRolePolicyResultOutput), nil }).(GetAwsAssumeRolePolicyResultOutput) } diff --git a/sdk/go/databricks/getAwsBucketPolicy.go b/sdk/go/databricks/getAwsBucketPolicy.go index b1098cd35..bab213106 100644 --- a/sdk/go/databricks/getAwsBucketPolicy.go +++ b/sdk/go/databricks/getAwsBucketPolicy.go @@ -89,21 +89,11 @@ type GetAwsBucketPolicyResult struct { } func GetAwsBucketPolicyOutput(ctx *pulumi.Context, args GetAwsBucketPolicyOutputArgs, opts ...pulumi.InvokeOption) GetAwsBucketPolicyResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetAwsBucketPolicyResultOutput, error) { args := v.(GetAwsBucketPolicyArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetAwsBucketPolicyResult - secret, err := ctx.InvokePackageRaw("databricks:index/getAwsBucketPolicy:getAwsBucketPolicy", args, &rv, "", opts...) - if err != nil { - return GetAwsBucketPolicyResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetAwsBucketPolicyResultOutput) - if secret { - return pulumi.ToSecret(output).(GetAwsBucketPolicyResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getAwsBucketPolicy:getAwsBucketPolicy", args, GetAwsBucketPolicyResultOutput{}, options).(GetAwsBucketPolicyResultOutput), nil }).(GetAwsBucketPolicyResultOutput) } diff --git a/sdk/go/databricks/getAwsCrossAccountPolicy.go b/sdk/go/databricks/getAwsCrossAccountPolicy.go index 512df891e..336a19975 100644 --- a/sdk/go/databricks/getAwsCrossAccountPolicy.go +++ b/sdk/go/databricks/getAwsCrossAccountPolicy.go @@ -91,21 +91,11 @@ type GetAwsCrossAccountPolicyResult struct { } func GetAwsCrossAccountPolicyOutput(ctx *pulumi.Context, args GetAwsCrossAccountPolicyOutputArgs, opts ...pulumi.InvokeOption) GetAwsCrossAccountPolicyResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetAwsCrossAccountPolicyResultOutput, error) { args := v.(GetAwsCrossAccountPolicyArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetAwsCrossAccountPolicyResult - secret, err := ctx.InvokePackageRaw("databricks:index/getAwsCrossAccountPolicy:getAwsCrossAccountPolicy", args, &rv, "", opts...) - if err != nil { - return GetAwsCrossAccountPolicyResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetAwsCrossAccountPolicyResultOutput) - if secret { - return pulumi.ToSecret(output).(GetAwsCrossAccountPolicyResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getAwsCrossAccountPolicy:getAwsCrossAccountPolicy", args, GetAwsCrossAccountPolicyResultOutput{}, options).(GetAwsCrossAccountPolicyResultOutput), nil }).(GetAwsCrossAccountPolicyResultOutput) } diff --git a/sdk/go/databricks/getAwsUnityCatalogAssumeRolePolicy.go b/sdk/go/databricks/getAwsUnityCatalogAssumeRolePolicy.go index 575e8b00a..8b9000c9d 100644 --- a/sdk/go/databricks/getAwsUnityCatalogAssumeRolePolicy.go +++ b/sdk/go/databricks/getAwsUnityCatalogAssumeRolePolicy.go @@ -105,21 +105,11 @@ type GetAwsUnityCatalogAssumeRolePolicyResult struct { } func GetAwsUnityCatalogAssumeRolePolicyOutput(ctx *pulumi.Context, args GetAwsUnityCatalogAssumeRolePolicyOutputArgs, opts ...pulumi.InvokeOption) GetAwsUnityCatalogAssumeRolePolicyResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetAwsUnityCatalogAssumeRolePolicyResultOutput, error) { args := v.(GetAwsUnityCatalogAssumeRolePolicyArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetAwsUnityCatalogAssumeRolePolicyResult - secret, err := ctx.InvokePackageRaw("databricks:index/getAwsUnityCatalogAssumeRolePolicy:getAwsUnityCatalogAssumeRolePolicy", args, &rv, "", opts...) - if err != nil { - return GetAwsUnityCatalogAssumeRolePolicyResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetAwsUnityCatalogAssumeRolePolicyResultOutput) - if secret { - return pulumi.ToSecret(output).(GetAwsUnityCatalogAssumeRolePolicyResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getAwsUnityCatalogAssumeRolePolicy:getAwsUnityCatalogAssumeRolePolicy", args, GetAwsUnityCatalogAssumeRolePolicyResultOutput{}, options).(GetAwsUnityCatalogAssumeRolePolicyResultOutput), nil }).(GetAwsUnityCatalogAssumeRolePolicyResultOutput) } diff --git a/sdk/go/databricks/getAwsUnityCatalogPolicy.go b/sdk/go/databricks/getAwsUnityCatalogPolicy.go index 93363d95a..2a9a336a8 100644 --- a/sdk/go/databricks/getAwsUnityCatalogPolicy.go +++ b/sdk/go/databricks/getAwsUnityCatalogPolicy.go @@ -106,21 +106,11 @@ type GetAwsUnityCatalogPolicyResult struct { } func GetAwsUnityCatalogPolicyOutput(ctx *pulumi.Context, args GetAwsUnityCatalogPolicyOutputArgs, opts ...pulumi.InvokeOption) GetAwsUnityCatalogPolicyResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetAwsUnityCatalogPolicyResultOutput, error) { args := v.(GetAwsUnityCatalogPolicyArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetAwsUnityCatalogPolicyResult - secret, err := ctx.InvokePackageRaw("databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy", args, &rv, "", opts...) - if err != nil { - return GetAwsUnityCatalogPolicyResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetAwsUnityCatalogPolicyResultOutput) - if secret { - return pulumi.ToSecret(output).(GetAwsUnityCatalogPolicyResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy", args, GetAwsUnityCatalogPolicyResultOutput{}, options).(GetAwsUnityCatalogPolicyResultOutput), nil }).(GetAwsUnityCatalogPolicyResultOutput) } diff --git a/sdk/go/databricks/getCatalog.go b/sdk/go/databricks/getCatalog.go index 93a43058d..615248acd 100644 --- a/sdk/go/databricks/getCatalog.go +++ b/sdk/go/databricks/getCatalog.go @@ -96,21 +96,11 @@ type LookupCatalogResult struct { } func LookupCatalogOutput(ctx *pulumi.Context, args LookupCatalogOutputArgs, opts ...pulumi.InvokeOption) LookupCatalogResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupCatalogResultOutput, error) { args := v.(LookupCatalogArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupCatalogResult - secret, err := ctx.InvokePackageRaw("databricks:index/getCatalog:getCatalog", args, &rv, "", opts...) - if err != nil { - return LookupCatalogResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupCatalogResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupCatalogResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getCatalog:getCatalog", args, LookupCatalogResultOutput{}, options).(LookupCatalogResultOutput), nil }).(LookupCatalogResultOutput) } diff --git a/sdk/go/databricks/getCatalogs.go b/sdk/go/databricks/getCatalogs.go index 266e16075..31cacb6a3 100644 --- a/sdk/go/databricks/getCatalogs.go +++ b/sdk/go/databricks/getCatalogs.go @@ -75,21 +75,11 @@ type GetCatalogsResult struct { } func GetCatalogsOutput(ctx *pulumi.Context, args GetCatalogsOutputArgs, opts ...pulumi.InvokeOption) GetCatalogsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetCatalogsResultOutput, error) { args := v.(GetCatalogsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetCatalogsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getCatalogs:getCatalogs", args, &rv, "", opts...) - if err != nil { - return GetCatalogsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetCatalogsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetCatalogsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getCatalogs:getCatalogs", args, GetCatalogsResultOutput{}, options).(GetCatalogsResultOutput), nil }).(GetCatalogsResultOutput) } diff --git a/sdk/go/databricks/getCluster.go b/sdk/go/databricks/getCluster.go index 44201b011..f34ef5913 100644 --- a/sdk/go/databricks/getCluster.go +++ b/sdk/go/databricks/getCluster.go @@ -48,21 +48,11 @@ type LookupClusterResult struct { } func LookupClusterOutput(ctx *pulumi.Context, args LookupClusterOutputArgs, opts ...pulumi.InvokeOption) LookupClusterResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupClusterResultOutput, error) { args := v.(LookupClusterArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupClusterResult - secret, err := ctx.InvokePackageRaw("databricks:index/getCluster:getCluster", args, &rv, "", opts...) - if err != nil { - return LookupClusterResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupClusterResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupClusterResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getCluster:getCluster", args, LookupClusterResultOutput{}, options).(LookupClusterResultOutput), nil }).(LookupClusterResultOutput) } diff --git a/sdk/go/databricks/getClusterPolicy.go b/sdk/go/databricks/getClusterPolicy.go index b464390e2..d38ad7d98 100644 --- a/sdk/go/databricks/getClusterPolicy.go +++ b/sdk/go/databricks/getClusterPolicy.go @@ -98,21 +98,11 @@ type LookupClusterPolicyResult struct { } func LookupClusterPolicyOutput(ctx *pulumi.Context, args LookupClusterPolicyOutputArgs, opts ...pulumi.InvokeOption) LookupClusterPolicyResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupClusterPolicyResultOutput, error) { args := v.(LookupClusterPolicyArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupClusterPolicyResult - secret, err := ctx.InvokePackageRaw("databricks:index/getClusterPolicy:getClusterPolicy", args, &rv, "", opts...) - if err != nil { - return LookupClusterPolicyResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupClusterPolicyResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupClusterPolicyResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getClusterPolicy:getClusterPolicy", args, LookupClusterPolicyResultOutput{}, options).(LookupClusterPolicyResultOutput), nil }).(LookupClusterPolicyResultOutput) } diff --git a/sdk/go/databricks/getClusters.go b/sdk/go/databricks/getClusters.go index 895302f0c..c7b9c1a3f 100644 --- a/sdk/go/databricks/getClusters.go +++ b/sdk/go/databricks/getClusters.go @@ -109,21 +109,11 @@ type GetClustersResult struct { } func GetClustersOutput(ctx *pulumi.Context, args GetClustersOutputArgs, opts ...pulumi.InvokeOption) GetClustersResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetClustersResultOutput, error) { args := v.(GetClustersArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetClustersResult - secret, err := ctx.InvokePackageRaw("databricks:index/getClusters:getClusters", args, &rv, "", opts...) - if err != nil { - return GetClustersResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetClustersResultOutput) - if secret { - return pulumi.ToSecret(output).(GetClustersResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getClusters:getClusters", args, GetClustersResultOutput{}, options).(GetClustersResultOutput), nil }).(GetClustersResultOutput) } diff --git a/sdk/go/databricks/getCurrentConfig.go b/sdk/go/databricks/getCurrentConfig.go index b1f81daa8..d70d66014 100644 --- a/sdk/go/databricks/getCurrentConfig.go +++ b/sdk/go/databricks/getCurrentConfig.go @@ -43,21 +43,11 @@ type GetCurrentConfigResult struct { } func GetCurrentConfigOutput(ctx *pulumi.Context, args GetCurrentConfigOutputArgs, opts ...pulumi.InvokeOption) GetCurrentConfigResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetCurrentConfigResultOutput, error) { args := v.(GetCurrentConfigArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetCurrentConfigResult - secret, err := ctx.InvokePackageRaw("databricks:index/getCurrentConfig:getCurrentConfig", args, &rv, "", opts...) - if err != nil { - return GetCurrentConfigResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetCurrentConfigResultOutput) - if secret { - return pulumi.ToSecret(output).(GetCurrentConfigResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getCurrentConfig:getCurrentConfig", args, GetCurrentConfigResultOutput{}, options).(GetCurrentConfigResultOutput), nil }).(GetCurrentConfigResultOutput) } diff --git a/sdk/go/databricks/getCurrentMetastore.go b/sdk/go/databricks/getCurrentMetastore.go index ff097cb3f..193599206 100644 --- a/sdk/go/databricks/getCurrentMetastore.go +++ b/sdk/go/databricks/getCurrentMetastore.go @@ -79,21 +79,11 @@ type GetCurrentMetastoreResult struct { } func GetCurrentMetastoreOutput(ctx *pulumi.Context, args GetCurrentMetastoreOutputArgs, opts ...pulumi.InvokeOption) GetCurrentMetastoreResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetCurrentMetastoreResultOutput, error) { args := v.(GetCurrentMetastoreArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetCurrentMetastoreResult - secret, err := ctx.InvokePackageRaw("databricks:index/getCurrentMetastore:getCurrentMetastore", args, &rv, "", opts...) - if err != nil { - return GetCurrentMetastoreResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetCurrentMetastoreResultOutput) - if secret { - return pulumi.ToSecret(output).(GetCurrentMetastoreResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getCurrentMetastore:getCurrentMetastore", args, GetCurrentMetastoreResultOutput{}, options).(GetCurrentMetastoreResultOutput), nil }).(GetCurrentMetastoreResultOutput) } diff --git a/sdk/go/databricks/getCurrentUser.go b/sdk/go/databricks/getCurrentUser.go index 9b8c83c50..a5c76bae7 100644 --- a/sdk/go/databricks/getCurrentUser.go +++ b/sdk/go/databricks/getCurrentUser.go @@ -39,18 +39,8 @@ type GetCurrentUserResult struct { func GetCurrentUserOutput(ctx *pulumi.Context, opts ...pulumi.InvokeOption) GetCurrentUserResultOutput { return pulumi.ToOutput(0).ApplyT(func(int) (GetCurrentUserResultOutput, error) { - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetCurrentUserResult - secret, err := ctx.InvokePackageRaw("databricks:index/getCurrentUser:getCurrentUser", nil, &rv, "", opts...) - if err != nil { - return GetCurrentUserResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetCurrentUserResultOutput) - if secret { - return pulumi.ToSecret(output).(GetCurrentUserResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getCurrentUser:getCurrentUser", nil, GetCurrentUserResultOutput{}, options).(GetCurrentUserResultOutput), nil }).(GetCurrentUserResultOutput) } diff --git a/sdk/go/databricks/getDbfsFile.go b/sdk/go/databricks/getDbfsFile.go index cc3a59640..066d88d5f 100644 --- a/sdk/go/databricks/getDbfsFile.go +++ b/sdk/go/databricks/getDbfsFile.go @@ -81,21 +81,11 @@ type LookupDbfsFileResult struct { } func LookupDbfsFileOutput(ctx *pulumi.Context, args LookupDbfsFileOutputArgs, opts ...pulumi.InvokeOption) LookupDbfsFileResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupDbfsFileResultOutput, error) { args := v.(LookupDbfsFileArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupDbfsFileResult - secret, err := ctx.InvokePackageRaw("databricks:index/getDbfsFile:getDbfsFile", args, &rv, "", opts...) - if err != nil { - return LookupDbfsFileResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupDbfsFileResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupDbfsFileResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getDbfsFile:getDbfsFile", args, LookupDbfsFileResultOutput{}, options).(LookupDbfsFileResultOutput), nil }).(LookupDbfsFileResultOutput) } diff --git a/sdk/go/databricks/getDbfsFilePaths.go b/sdk/go/databricks/getDbfsFilePaths.go index af71e2046..ee986b2fe 100644 --- a/sdk/go/databricks/getDbfsFilePaths.go +++ b/sdk/go/databricks/getDbfsFilePaths.go @@ -81,21 +81,11 @@ type GetDbfsFilePathsResult struct { } func GetDbfsFilePathsOutput(ctx *pulumi.Context, args GetDbfsFilePathsOutputArgs, opts ...pulumi.InvokeOption) GetDbfsFilePathsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetDbfsFilePathsResultOutput, error) { args := v.(GetDbfsFilePathsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetDbfsFilePathsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getDbfsFilePaths:getDbfsFilePaths", args, &rv, "", opts...) - if err != nil { - return GetDbfsFilePathsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetDbfsFilePathsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetDbfsFilePathsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getDbfsFilePaths:getDbfsFilePaths", args, GetDbfsFilePathsResultOutput{}, options).(GetDbfsFilePathsResultOutput), nil }).(GetDbfsFilePathsResultOutput) } diff --git a/sdk/go/databricks/getDirectory.go b/sdk/go/databricks/getDirectory.go index 88e0aa998..f2fd08c0d 100644 --- a/sdk/go/databricks/getDirectory.go +++ b/sdk/go/databricks/getDirectory.go @@ -72,21 +72,11 @@ type LookupDirectoryResult struct { } func LookupDirectoryOutput(ctx *pulumi.Context, args LookupDirectoryOutputArgs, opts ...pulumi.InvokeOption) LookupDirectoryResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupDirectoryResultOutput, error) { args := v.(LookupDirectoryArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupDirectoryResult - secret, err := ctx.InvokePackageRaw("databricks:index/getDirectory:getDirectory", args, &rv, "", opts...) - if err != nil { - return LookupDirectoryResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupDirectoryResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupDirectoryResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getDirectory:getDirectory", args, LookupDirectoryResultOutput{}, options).(LookupDirectoryResultOutput), nil }).(LookupDirectoryResultOutput) } diff --git a/sdk/go/databricks/getExternalLocation.go b/sdk/go/databricks/getExternalLocation.go index b146e2848..e11172167 100644 --- a/sdk/go/databricks/getExternalLocation.go +++ b/sdk/go/databricks/getExternalLocation.go @@ -80,21 +80,11 @@ type LookupExternalLocationResult struct { } func LookupExternalLocationOutput(ctx *pulumi.Context, args LookupExternalLocationOutputArgs, opts ...pulumi.InvokeOption) LookupExternalLocationResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupExternalLocationResultOutput, error) { args := v.(LookupExternalLocationArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupExternalLocationResult - secret, err := ctx.InvokePackageRaw("databricks:index/getExternalLocation:getExternalLocation", args, &rv, "", opts...) - if err != nil { - return LookupExternalLocationResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupExternalLocationResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupExternalLocationResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getExternalLocation:getExternalLocation", args, LookupExternalLocationResultOutput{}, options).(LookupExternalLocationResultOutput), nil }).(LookupExternalLocationResultOutput) } diff --git a/sdk/go/databricks/getExternalLocations.go b/sdk/go/databricks/getExternalLocations.go index 16b6a5eda..f798f39a0 100644 --- a/sdk/go/databricks/getExternalLocations.go +++ b/sdk/go/databricks/getExternalLocations.go @@ -73,21 +73,11 @@ type GetExternalLocationsResult struct { } func GetExternalLocationsOutput(ctx *pulumi.Context, args GetExternalLocationsOutputArgs, opts ...pulumi.InvokeOption) GetExternalLocationsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetExternalLocationsResultOutput, error) { args := v.(GetExternalLocationsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetExternalLocationsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getExternalLocations:getExternalLocations", args, &rv, "", opts...) - if err != nil { - return GetExternalLocationsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetExternalLocationsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetExternalLocationsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getExternalLocations:getExternalLocations", args, GetExternalLocationsResultOutput{}, options).(GetExternalLocationsResultOutput), nil }).(GetExternalLocationsResultOutput) } diff --git a/sdk/go/databricks/getFunctions.go b/sdk/go/databricks/getFunctions.go index 7f5003038..97811f590 100644 --- a/sdk/go/databricks/getFunctions.go +++ b/sdk/go/databricks/getFunctions.go @@ -86,21 +86,11 @@ type GetFunctionsResult struct { } func GetFunctionsOutput(ctx *pulumi.Context, args GetFunctionsOutputArgs, opts ...pulumi.InvokeOption) GetFunctionsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetFunctionsResultOutput, error) { args := v.(GetFunctionsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetFunctionsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getFunctions:getFunctions", args, &rv, "", opts...) - if err != nil { - return GetFunctionsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetFunctionsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetFunctionsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getFunctions:getFunctions", args, GetFunctionsResultOutput{}, options).(GetFunctionsResultOutput), nil }).(GetFunctionsResultOutput) } diff --git a/sdk/go/databricks/getGroup.go b/sdk/go/databricks/getGroup.go index f684f2bbb..d4a54812d 100644 --- a/sdk/go/databricks/getGroup.go +++ b/sdk/go/databricks/getGroup.go @@ -137,21 +137,11 @@ type LookupGroupResult struct { } func LookupGroupOutput(ctx *pulumi.Context, args LookupGroupOutputArgs, opts ...pulumi.InvokeOption) LookupGroupResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupGroupResultOutput, error) { args := v.(LookupGroupArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupGroupResult - secret, err := ctx.InvokePackageRaw("databricks:index/getGroup:getGroup", args, &rv, "", opts...) - if err != nil { - return LookupGroupResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupGroupResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupGroupResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getGroup:getGroup", args, LookupGroupResultOutput{}, options).(LookupGroupResultOutput), nil }).(LookupGroupResultOutput) } diff --git a/sdk/go/databricks/getInstancePool.go b/sdk/go/databricks/getInstancePool.go index 628729c19..74f07179b 100644 --- a/sdk/go/databricks/getInstancePool.go +++ b/sdk/go/databricks/getInstancePool.go @@ -76,21 +76,11 @@ type LookupInstancePoolResult struct { } func LookupInstancePoolOutput(ctx *pulumi.Context, args LookupInstancePoolOutputArgs, opts ...pulumi.InvokeOption) LookupInstancePoolResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupInstancePoolResultOutput, error) { args := v.(LookupInstancePoolArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupInstancePoolResult - secret, err := ctx.InvokePackageRaw("databricks:index/getInstancePool:getInstancePool", args, &rv, "", opts...) - if err != nil { - return LookupInstancePoolResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupInstancePoolResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupInstancePoolResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getInstancePool:getInstancePool", args, LookupInstancePoolResultOutput{}, options).(LookupInstancePoolResultOutput), nil }).(LookupInstancePoolResultOutput) } diff --git a/sdk/go/databricks/getInstanceProfiles.go b/sdk/go/databricks/getInstanceProfiles.go index dafc91466..1d8c99d59 100644 --- a/sdk/go/databricks/getInstanceProfiles.go +++ b/sdk/go/databricks/getInstanceProfiles.go @@ -64,21 +64,11 @@ type GetInstanceProfilesResult struct { } func GetInstanceProfilesOutput(ctx *pulumi.Context, args GetInstanceProfilesOutputArgs, opts ...pulumi.InvokeOption) GetInstanceProfilesResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetInstanceProfilesResultOutput, error) { args := v.(GetInstanceProfilesArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetInstanceProfilesResult - secret, err := ctx.InvokePackageRaw("databricks:index/getInstanceProfiles:getInstanceProfiles", args, &rv, "", opts...) - if err != nil { - return GetInstanceProfilesResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetInstanceProfilesResultOutput) - if secret { - return pulumi.ToSecret(output).(GetInstanceProfilesResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getInstanceProfiles:getInstanceProfiles", args, GetInstanceProfilesResultOutput{}, options).(GetInstanceProfilesResultOutput), nil }).(GetInstanceProfilesResultOutput) } diff --git a/sdk/go/databricks/getJob.go b/sdk/go/databricks/getJob.go index c959daf48..c5917aaa6 100644 --- a/sdk/go/databricks/getJob.go +++ b/sdk/go/databricks/getJob.go @@ -85,21 +85,11 @@ type LookupJobResult struct { } func LookupJobOutput(ctx *pulumi.Context, args LookupJobOutputArgs, opts ...pulumi.InvokeOption) LookupJobResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupJobResultOutput, error) { args := v.(LookupJobArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupJobResult - secret, err := ctx.InvokePackageRaw("databricks:index/getJob:getJob", args, &rv, "", opts...) - if err != nil { - return LookupJobResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupJobResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupJobResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getJob:getJob", args, LookupJobResultOutput{}, options).(LookupJobResultOutput), nil }).(LookupJobResultOutput) } diff --git a/sdk/go/databricks/getJobs.go b/sdk/go/databricks/getJobs.go index 2fa877c15..d3a8bfa3b 100644 --- a/sdk/go/databricks/getJobs.go +++ b/sdk/go/databricks/getJobs.go @@ -116,21 +116,11 @@ type GetJobsResult struct { } func GetJobsOutput(ctx *pulumi.Context, args GetJobsOutputArgs, opts ...pulumi.InvokeOption) GetJobsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetJobsResultOutput, error) { args := v.(GetJobsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetJobsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getJobs:getJobs", args, &rv, "", opts...) - if err != nil { - return GetJobsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetJobsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetJobsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getJobs:getJobs", args, GetJobsResultOutput{}, options).(GetJobsResultOutput), nil }).(GetJobsResultOutput) } diff --git a/sdk/go/databricks/getMetastore.go b/sdk/go/databricks/getMetastore.go index 00a3aa3a1..727c18d9c 100644 --- a/sdk/go/databricks/getMetastore.go +++ b/sdk/go/databricks/getMetastore.go @@ -110,21 +110,11 @@ type LookupMetastoreResult struct { } func LookupMetastoreOutput(ctx *pulumi.Context, args LookupMetastoreOutputArgs, opts ...pulumi.InvokeOption) LookupMetastoreResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupMetastoreResultOutput, error) { args := v.(LookupMetastoreArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupMetastoreResult - secret, err := ctx.InvokePackageRaw("databricks:index/getMetastore:getMetastore", args, &rv, "", opts...) - if err != nil { - return LookupMetastoreResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupMetastoreResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupMetastoreResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getMetastore:getMetastore", args, LookupMetastoreResultOutput{}, options).(LookupMetastoreResultOutput), nil }).(LookupMetastoreResultOutput) } diff --git a/sdk/go/databricks/getMetastores.go b/sdk/go/databricks/getMetastores.go index 3ee430edd..3272d3582 100644 --- a/sdk/go/databricks/getMetastores.go +++ b/sdk/go/databricks/getMetastores.go @@ -76,21 +76,11 @@ type GetMetastoresResult struct { } func GetMetastoresOutput(ctx *pulumi.Context, args GetMetastoresOutputArgs, opts ...pulumi.InvokeOption) GetMetastoresResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetMetastoresResultOutput, error) { args := v.(GetMetastoresArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetMetastoresResult - secret, err := ctx.InvokePackageRaw("databricks:index/getMetastores:getMetastores", args, &rv, "", opts...) - if err != nil { - return GetMetastoresResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetMetastoresResultOutput) - if secret { - return pulumi.ToSecret(output).(GetMetastoresResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getMetastores:getMetastores", args, GetMetastoresResultOutput{}, options).(GetMetastoresResultOutput), nil }).(GetMetastoresResultOutput) } diff --git a/sdk/go/databricks/getMlflowExperiment.go b/sdk/go/databricks/getMlflowExperiment.go index 91eb09d47..686721a74 100644 --- a/sdk/go/databricks/getMlflowExperiment.go +++ b/sdk/go/databricks/getMlflowExperiment.go @@ -65,21 +65,11 @@ type LookupMlflowExperimentResult struct { } func LookupMlflowExperimentOutput(ctx *pulumi.Context, args LookupMlflowExperimentOutputArgs, opts ...pulumi.InvokeOption) LookupMlflowExperimentResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupMlflowExperimentResultOutput, error) { args := v.(LookupMlflowExperimentArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupMlflowExperimentResult - secret, err := ctx.InvokePackageRaw("databricks:index/getMlflowExperiment:getMlflowExperiment", args, &rv, "", opts...) - if err != nil { - return LookupMlflowExperimentResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupMlflowExperimentResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupMlflowExperimentResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getMlflowExperiment:getMlflowExperiment", args, LookupMlflowExperimentResultOutput{}, options).(LookupMlflowExperimentResultOutput), nil }).(LookupMlflowExperimentResultOutput) } diff --git a/sdk/go/databricks/getMlflowModel.go b/sdk/go/databricks/getMlflowModel.go index 8e8c15197..9bdd09831 100644 --- a/sdk/go/databricks/getMlflowModel.go +++ b/sdk/go/databricks/getMlflowModel.go @@ -144,21 +144,11 @@ type LookupMlflowModelResult struct { } func LookupMlflowModelOutput(ctx *pulumi.Context, args LookupMlflowModelOutputArgs, opts ...pulumi.InvokeOption) LookupMlflowModelResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupMlflowModelResultOutput, error) { args := v.(LookupMlflowModelArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupMlflowModelResult - secret, err := ctx.InvokePackageRaw("databricks:index/getMlflowModel:getMlflowModel", args, &rv, "", opts...) - if err != nil { - return LookupMlflowModelResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupMlflowModelResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupMlflowModelResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getMlflowModel:getMlflowModel", args, LookupMlflowModelResultOutput{}, options).(LookupMlflowModelResultOutput), nil }).(LookupMlflowModelResultOutput) } diff --git a/sdk/go/databricks/getMlflowModels.go b/sdk/go/databricks/getMlflowModels.go index 07e4547ee..0071e7a6d 100644 --- a/sdk/go/databricks/getMlflowModels.go +++ b/sdk/go/databricks/getMlflowModels.go @@ -64,21 +64,11 @@ type GetMlflowModelsResult struct { } func GetMlflowModelsOutput(ctx *pulumi.Context, args GetMlflowModelsOutputArgs, opts ...pulumi.InvokeOption) GetMlflowModelsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetMlflowModelsResultOutput, error) { args := v.(GetMlflowModelsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetMlflowModelsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getMlflowModels:getMlflowModels", args, &rv, "", opts...) - if err != nil { - return GetMlflowModelsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetMlflowModelsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetMlflowModelsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getMlflowModels:getMlflowModels", args, GetMlflowModelsResultOutput{}, options).(GetMlflowModelsResultOutput), nil }).(GetMlflowModelsResultOutput) } diff --git a/sdk/go/databricks/getMwsCredentials.go b/sdk/go/databricks/getMwsCredentials.go index 7cd524472..814bd759c 100644 --- a/sdk/go/databricks/getMwsCredentials.go +++ b/sdk/go/databricks/getMwsCredentials.go @@ -79,21 +79,11 @@ type LookupMwsCredentialsResult struct { } func LookupMwsCredentialsOutput(ctx *pulumi.Context, args LookupMwsCredentialsOutputArgs, opts ...pulumi.InvokeOption) LookupMwsCredentialsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupMwsCredentialsResultOutput, error) { args := v.(LookupMwsCredentialsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupMwsCredentialsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getMwsCredentials:getMwsCredentials", args, &rv, "", opts...) - if err != nil { - return LookupMwsCredentialsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupMwsCredentialsResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupMwsCredentialsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getMwsCredentials:getMwsCredentials", args, LookupMwsCredentialsResultOutput{}, options).(LookupMwsCredentialsResultOutput), nil }).(LookupMwsCredentialsResultOutput) } diff --git a/sdk/go/databricks/getMwsWorkspaces.go b/sdk/go/databricks/getMwsWorkspaces.go index 0bf0dd91b..e4f0e7916 100644 --- a/sdk/go/databricks/getMwsWorkspaces.go +++ b/sdk/go/databricks/getMwsWorkspaces.go @@ -75,21 +75,11 @@ type LookupMwsWorkspacesResult struct { } func LookupMwsWorkspacesOutput(ctx *pulumi.Context, args LookupMwsWorkspacesOutputArgs, opts ...pulumi.InvokeOption) LookupMwsWorkspacesResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupMwsWorkspacesResultOutput, error) { args := v.(LookupMwsWorkspacesArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupMwsWorkspacesResult - secret, err := ctx.InvokePackageRaw("databricks:index/getMwsWorkspaces:getMwsWorkspaces", args, &rv, "", opts...) - if err != nil { - return LookupMwsWorkspacesResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupMwsWorkspacesResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupMwsWorkspacesResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getMwsWorkspaces:getMwsWorkspaces", args, LookupMwsWorkspacesResultOutput{}, options).(LookupMwsWorkspacesResultOutput), nil }).(LookupMwsWorkspacesResultOutput) } diff --git a/sdk/go/databricks/getNodeType.go b/sdk/go/databricks/getNodeType.go index ceaf871ff..3a2820f47 100644 --- a/sdk/go/databricks/getNodeType.go +++ b/sdk/go/databricks/getNodeType.go @@ -144,21 +144,11 @@ type GetNodeTypeResult struct { } func GetNodeTypeOutput(ctx *pulumi.Context, args GetNodeTypeOutputArgs, opts ...pulumi.InvokeOption) GetNodeTypeResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetNodeTypeResultOutput, error) { args := v.(GetNodeTypeArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetNodeTypeResult - secret, err := ctx.InvokePackageRaw("databricks:index/getNodeType:getNodeType", args, &rv, "", opts...) - if err != nil { - return GetNodeTypeResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetNodeTypeResultOutput) - if secret { - return pulumi.ToSecret(output).(GetNodeTypeResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getNodeType:getNodeType", args, GetNodeTypeResultOutput{}, options).(GetNodeTypeResultOutput), nil }).(GetNodeTypeResultOutput) } diff --git a/sdk/go/databricks/getNotebook.go b/sdk/go/databricks/getNotebook.go index 3dda3a89e..369e79583 100644 --- a/sdk/go/databricks/getNotebook.go +++ b/sdk/go/databricks/getNotebook.go @@ -84,21 +84,11 @@ type LookupNotebookResult struct { } func LookupNotebookOutput(ctx *pulumi.Context, args LookupNotebookOutputArgs, opts ...pulumi.InvokeOption) LookupNotebookResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupNotebookResultOutput, error) { args := v.(LookupNotebookArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupNotebookResult - secret, err := ctx.InvokePackageRaw("databricks:index/getNotebook:getNotebook", args, &rv, "", opts...) - if err != nil { - return LookupNotebookResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupNotebookResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupNotebookResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getNotebook:getNotebook", args, LookupNotebookResultOutput{}, options).(LookupNotebookResultOutput), nil }).(LookupNotebookResultOutput) } diff --git a/sdk/go/databricks/getNotebookPaths.go b/sdk/go/databricks/getNotebookPaths.go index 371973ffb..c8c4ed531 100644 --- a/sdk/go/databricks/getNotebookPaths.go +++ b/sdk/go/databricks/getNotebookPaths.go @@ -70,21 +70,11 @@ type GetNotebookPathsResult struct { } func GetNotebookPathsOutput(ctx *pulumi.Context, args GetNotebookPathsOutputArgs, opts ...pulumi.InvokeOption) GetNotebookPathsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetNotebookPathsResultOutput, error) { args := v.(GetNotebookPathsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetNotebookPathsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getNotebookPaths:getNotebookPaths", args, &rv, "", opts...) - if err != nil { - return GetNotebookPathsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetNotebookPathsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetNotebookPathsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getNotebookPaths:getNotebookPaths", args, GetNotebookPathsResultOutput{}, options).(GetNotebookPathsResultOutput), nil }).(GetNotebookPathsResultOutput) } diff --git a/sdk/go/databricks/getNotificationDestinations.go b/sdk/go/databricks/getNotificationDestinations.go index fe3d531d8..ceda741e8 100644 --- a/sdk/go/databricks/getNotificationDestinations.go +++ b/sdk/go/databricks/getNotificationDestinations.go @@ -105,21 +105,11 @@ type GetNotificationDestinationsResult struct { } func GetNotificationDestinationsOutput(ctx *pulumi.Context, args GetNotificationDestinationsOutputArgs, opts ...pulumi.InvokeOption) GetNotificationDestinationsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetNotificationDestinationsResultOutput, error) { args := v.(GetNotificationDestinationsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetNotificationDestinationsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getNotificationDestinations:getNotificationDestinations", args, &rv, "", opts...) - if err != nil { - return GetNotificationDestinationsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetNotificationDestinationsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetNotificationDestinationsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getNotificationDestinations:getNotificationDestinations", args, GetNotificationDestinationsResultOutput{}, options).(GetNotificationDestinationsResultOutput), nil }).(GetNotificationDestinationsResultOutput) } diff --git a/sdk/go/databricks/getPipelines.go b/sdk/go/databricks/getPipelines.go index f8594ba0d..4bf8f00c1 100644 --- a/sdk/go/databricks/getPipelines.go +++ b/sdk/go/databricks/getPipelines.go @@ -133,21 +133,11 @@ type GetPipelinesResult struct { } func GetPipelinesOutput(ctx *pulumi.Context, args GetPipelinesOutputArgs, opts ...pulumi.InvokeOption) GetPipelinesResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetPipelinesResultOutput, error) { args := v.(GetPipelinesArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetPipelinesResult - secret, err := ctx.InvokePackageRaw("databricks:index/getPipelines:getPipelines", args, &rv, "", opts...) - if err != nil { - return GetPipelinesResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetPipelinesResultOutput) - if secret { - return pulumi.ToSecret(output).(GetPipelinesResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getPipelines:getPipelines", args, GetPipelinesResultOutput{}, options).(GetPipelinesResultOutput), nil }).(GetPipelinesResultOutput) } diff --git a/sdk/go/databricks/getRegisteredModel.go b/sdk/go/databricks/getRegisteredModel.go index ec053184d..0cfb66ab8 100644 --- a/sdk/go/databricks/getRegisteredModel.go +++ b/sdk/go/databricks/getRegisteredModel.go @@ -83,21 +83,11 @@ type LookupRegisteredModelResult struct { } func LookupRegisteredModelOutput(ctx *pulumi.Context, args LookupRegisteredModelOutputArgs, opts ...pulumi.InvokeOption) LookupRegisteredModelResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupRegisteredModelResultOutput, error) { args := v.(LookupRegisteredModelArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupRegisteredModelResult - secret, err := ctx.InvokePackageRaw("databricks:index/getRegisteredModel:getRegisteredModel", args, &rv, "", opts...) - if err != nil { - return LookupRegisteredModelResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupRegisteredModelResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupRegisteredModelResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getRegisteredModel:getRegisteredModel", args, LookupRegisteredModelResultOutput{}, options).(LookupRegisteredModelResultOutput), nil }).(LookupRegisteredModelResultOutput) } diff --git a/sdk/go/databricks/getSchema.go b/sdk/go/databricks/getSchema.go index f23b6c5ca..bec67d2ea 100644 --- a/sdk/go/databricks/getSchema.go +++ b/sdk/go/databricks/getSchema.go @@ -44,21 +44,11 @@ type LookupSchemaResult struct { } func LookupSchemaOutput(ctx *pulumi.Context, args LookupSchemaOutputArgs, opts ...pulumi.InvokeOption) LookupSchemaResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupSchemaResultOutput, error) { args := v.(LookupSchemaArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupSchemaResult - secret, err := ctx.InvokePackageRaw("databricks:index/getSchema:getSchema", args, &rv, "", opts...) - if err != nil { - return LookupSchemaResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupSchemaResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupSchemaResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getSchema:getSchema", args, LookupSchemaResultOutput{}, options).(LookupSchemaResultOutput), nil }).(LookupSchemaResultOutput) } diff --git a/sdk/go/databricks/getSchemas.go b/sdk/go/databricks/getSchemas.go index 5b23120f1..5ccae25d1 100644 --- a/sdk/go/databricks/getSchemas.go +++ b/sdk/go/databricks/getSchemas.go @@ -80,21 +80,11 @@ type GetSchemasResult struct { } func GetSchemasOutput(ctx *pulumi.Context, args GetSchemasOutputArgs, opts ...pulumi.InvokeOption) GetSchemasResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetSchemasResultOutput, error) { args := v.(GetSchemasArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetSchemasResult - secret, err := ctx.InvokePackageRaw("databricks:index/getSchemas:getSchemas", args, &rv, "", opts...) - if err != nil { - return GetSchemasResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetSchemasResultOutput) - if secret { - return pulumi.ToSecret(output).(GetSchemasResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getSchemas:getSchemas", args, GetSchemasResultOutput{}, options).(GetSchemasResultOutput), nil }).(GetSchemasResultOutput) } diff --git a/sdk/go/databricks/getServicePrincipal.go b/sdk/go/databricks/getServicePrincipal.go index 25185503b..29aa1c599 100644 --- a/sdk/go/databricks/getServicePrincipal.go +++ b/sdk/go/databricks/getServicePrincipal.go @@ -120,21 +120,11 @@ type LookupServicePrincipalResult struct { } func LookupServicePrincipalOutput(ctx *pulumi.Context, args LookupServicePrincipalOutputArgs, opts ...pulumi.InvokeOption) LookupServicePrincipalResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupServicePrincipalResultOutput, error) { args := v.(LookupServicePrincipalArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupServicePrincipalResult - secret, err := ctx.InvokePackageRaw("databricks:index/getServicePrincipal:getServicePrincipal", args, &rv, "", opts...) - if err != nil { - return LookupServicePrincipalResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupServicePrincipalResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupServicePrincipalResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getServicePrincipal:getServicePrincipal", args, LookupServicePrincipalResultOutput{}, options).(LookupServicePrincipalResultOutput), nil }).(LookupServicePrincipalResultOutput) } diff --git a/sdk/go/databricks/getServicePrincipals.go b/sdk/go/databricks/getServicePrincipals.go index 0f573fec7..3c4151513 100644 --- a/sdk/go/databricks/getServicePrincipals.go +++ b/sdk/go/databricks/getServicePrincipals.go @@ -42,21 +42,11 @@ type GetServicePrincipalsResult struct { } func GetServicePrincipalsOutput(ctx *pulumi.Context, args GetServicePrincipalsOutputArgs, opts ...pulumi.InvokeOption) GetServicePrincipalsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetServicePrincipalsResultOutput, error) { args := v.(GetServicePrincipalsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetServicePrincipalsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getServicePrincipals:getServicePrincipals", args, &rv, "", opts...) - if err != nil { - return GetServicePrincipalsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetServicePrincipalsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetServicePrincipalsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getServicePrincipals:getServicePrincipals", args, GetServicePrincipalsResultOutput{}, options).(GetServicePrincipalsResultOutput), nil }).(GetServicePrincipalsResultOutput) } diff --git a/sdk/go/databricks/getShare.go b/sdk/go/databricks/getShare.go index 4fb5a84f0..be4e1f928 100644 --- a/sdk/go/databricks/getShare.go +++ b/sdk/go/databricks/getShare.go @@ -86,21 +86,11 @@ type LookupShareResult struct { } func LookupShareOutput(ctx *pulumi.Context, args LookupShareOutputArgs, opts ...pulumi.InvokeOption) LookupShareResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupShareResultOutput, error) { args := v.(LookupShareArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupShareResult - secret, err := ctx.InvokePackageRaw("databricks:index/getShare:getShare", args, &rv, "", opts...) - if err != nil { - return LookupShareResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupShareResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupShareResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getShare:getShare", args, LookupShareResultOutput{}, options).(LookupShareResultOutput), nil }).(LookupShareResultOutput) } diff --git a/sdk/go/databricks/getShares.go b/sdk/go/databricks/getShares.go index 44c9c527b..f905111ca 100644 --- a/sdk/go/databricks/getShares.go +++ b/sdk/go/databricks/getShares.go @@ -72,21 +72,11 @@ type GetSharesResult struct { } func GetSharesOutput(ctx *pulumi.Context, args GetSharesOutputArgs, opts ...pulumi.InvokeOption) GetSharesResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetSharesResultOutput, error) { args := v.(GetSharesArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetSharesResult - secret, err := ctx.InvokePackageRaw("databricks:index/getShares:getShares", args, &rv, "", opts...) - if err != nil { - return GetSharesResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetSharesResultOutput) - if secret { - return pulumi.ToSecret(output).(GetSharesResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getShares:getShares", args, GetSharesResultOutput{}, options).(GetSharesResultOutput), nil }).(GetSharesResultOutput) } diff --git a/sdk/go/databricks/getSparkVersion.go b/sdk/go/databricks/getSparkVersion.go index 2da326934..a4d7cf7c9 100644 --- a/sdk/go/databricks/getSparkVersion.go +++ b/sdk/go/databricks/getSparkVersion.go @@ -134,21 +134,11 @@ type GetSparkVersionResult struct { } func GetSparkVersionOutput(ctx *pulumi.Context, args GetSparkVersionOutputArgs, opts ...pulumi.InvokeOption) GetSparkVersionResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetSparkVersionResultOutput, error) { args := v.(GetSparkVersionArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetSparkVersionResult - secret, err := ctx.InvokePackageRaw("databricks:index/getSparkVersion:getSparkVersion", args, &rv, "", opts...) - if err != nil { - return GetSparkVersionResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetSparkVersionResultOutput) - if secret { - return pulumi.ToSecret(output).(GetSparkVersionResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getSparkVersion:getSparkVersion", args, GetSparkVersionResultOutput{}, options).(GetSparkVersionResultOutput), nil }).(GetSparkVersionResultOutput) } diff --git a/sdk/go/databricks/getSqlWarehouse.go b/sdk/go/databricks/getSqlWarehouse.go index e2b988654..d2bb256ad 100644 --- a/sdk/go/databricks/getSqlWarehouse.go +++ b/sdk/go/databricks/getSqlWarehouse.go @@ -115,21 +115,11 @@ type GetSqlWarehouseResult struct { } func GetSqlWarehouseOutput(ctx *pulumi.Context, args GetSqlWarehouseOutputArgs, opts ...pulumi.InvokeOption) GetSqlWarehouseResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetSqlWarehouseResultOutput, error) { args := v.(GetSqlWarehouseArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetSqlWarehouseResult - secret, err := ctx.InvokePackageRaw("databricks:index/getSqlWarehouse:getSqlWarehouse", args, &rv, "", opts...) - if err != nil { - return GetSqlWarehouseResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetSqlWarehouseResultOutput) - if secret { - return pulumi.ToSecret(output).(GetSqlWarehouseResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getSqlWarehouse:getSqlWarehouse", args, GetSqlWarehouseResultOutput{}, options).(GetSqlWarehouseResultOutput), nil }).(GetSqlWarehouseResultOutput) } diff --git a/sdk/go/databricks/getSqlWarehouses.go b/sdk/go/databricks/getSqlWarehouses.go index d1552be60..c7cdbc64c 100644 --- a/sdk/go/databricks/getSqlWarehouses.go +++ b/sdk/go/databricks/getSqlWarehouses.go @@ -104,21 +104,11 @@ type GetSqlWarehousesResult struct { } func GetSqlWarehousesOutput(ctx *pulumi.Context, args GetSqlWarehousesOutputArgs, opts ...pulumi.InvokeOption) GetSqlWarehousesResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetSqlWarehousesResultOutput, error) { args := v.(GetSqlWarehousesArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetSqlWarehousesResult - secret, err := ctx.InvokePackageRaw("databricks:index/getSqlWarehouses:getSqlWarehouses", args, &rv, "", opts...) - if err != nil { - return GetSqlWarehousesResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetSqlWarehousesResultOutput) - if secret { - return pulumi.ToSecret(output).(GetSqlWarehousesResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getSqlWarehouses:getSqlWarehouses", args, GetSqlWarehousesResultOutput{}, options).(GetSqlWarehousesResultOutput), nil }).(GetSqlWarehousesResultOutput) } diff --git a/sdk/go/databricks/getStorageCredential.go b/sdk/go/databricks/getStorageCredential.go index 8f922cea5..835a974e5 100644 --- a/sdk/go/databricks/getStorageCredential.go +++ b/sdk/go/databricks/getStorageCredential.go @@ -80,21 +80,11 @@ type LookupStorageCredentialResult struct { } func LookupStorageCredentialOutput(ctx *pulumi.Context, args LookupStorageCredentialOutputArgs, opts ...pulumi.InvokeOption) LookupStorageCredentialResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupStorageCredentialResultOutput, error) { args := v.(LookupStorageCredentialArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupStorageCredentialResult - secret, err := ctx.InvokePackageRaw("databricks:index/getStorageCredential:getStorageCredential", args, &rv, "", opts...) - if err != nil { - return LookupStorageCredentialResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupStorageCredentialResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupStorageCredentialResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getStorageCredential:getStorageCredential", args, LookupStorageCredentialResultOutput{}, options).(LookupStorageCredentialResultOutput), nil }).(LookupStorageCredentialResultOutput) } diff --git a/sdk/go/databricks/getStorageCredentials.go b/sdk/go/databricks/getStorageCredentials.go index 04bc2c571..71c2b731b 100644 --- a/sdk/go/databricks/getStorageCredentials.go +++ b/sdk/go/databricks/getStorageCredentials.go @@ -73,21 +73,11 @@ type GetStorageCredentialsResult struct { } func GetStorageCredentialsOutput(ctx *pulumi.Context, args GetStorageCredentialsOutputArgs, opts ...pulumi.InvokeOption) GetStorageCredentialsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetStorageCredentialsResultOutput, error) { args := v.(GetStorageCredentialsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetStorageCredentialsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getStorageCredentials:getStorageCredentials", args, &rv, "", opts...) - if err != nil { - return GetStorageCredentialsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetStorageCredentialsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetStorageCredentialsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getStorageCredentials:getStorageCredentials", args, GetStorageCredentialsResultOutput{}, options).(GetStorageCredentialsResultOutput), nil }).(GetStorageCredentialsResultOutput) } diff --git a/sdk/go/databricks/getTable.go b/sdk/go/databricks/getTable.go index e85ad18e7..a7c6a5c5d 100644 --- a/sdk/go/databricks/getTable.go +++ b/sdk/go/databricks/getTable.go @@ -95,21 +95,11 @@ type LookupTableResult struct { } func LookupTableOutput(ctx *pulumi.Context, args LookupTableOutputArgs, opts ...pulumi.InvokeOption) LookupTableResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupTableResultOutput, error) { args := v.(LookupTableArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupTableResult - secret, err := ctx.InvokePackageRaw("databricks:index/getTable:getTable", args, &rv, "", opts...) - if err != nil { - return LookupTableResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupTableResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupTableResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getTable:getTable", args, LookupTableResultOutput{}, options).(LookupTableResultOutput), nil }).(LookupTableResultOutput) } diff --git a/sdk/go/databricks/getTables.go b/sdk/go/databricks/getTables.go index d17c3a2b7..29ffa47a6 100644 --- a/sdk/go/databricks/getTables.go +++ b/sdk/go/databricks/getTables.go @@ -102,21 +102,11 @@ type GetTablesResult struct { } func GetTablesOutput(ctx *pulumi.Context, args GetTablesOutputArgs, opts ...pulumi.InvokeOption) GetTablesResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetTablesResultOutput, error) { args := v.(GetTablesArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetTablesResult - secret, err := ctx.InvokePackageRaw("databricks:index/getTables:getTables", args, &rv, "", opts...) - if err != nil { - return GetTablesResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetTablesResultOutput) - if secret { - return pulumi.ToSecret(output).(GetTablesResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getTables:getTables", args, GetTablesResultOutput{}, options).(GetTablesResultOutput), nil }).(GetTablesResultOutput) } diff --git a/sdk/go/databricks/getUser.go b/sdk/go/databricks/getUser.go index 5f9425ea7..6666f2fbf 100644 --- a/sdk/go/databricks/getUser.go +++ b/sdk/go/databricks/getUser.go @@ -112,21 +112,11 @@ type LookupUserResult struct { } func LookupUserOutput(ctx *pulumi.Context, args LookupUserOutputArgs, opts ...pulumi.InvokeOption) LookupUserResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupUserResultOutput, error) { args := v.(LookupUserArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupUserResult - secret, err := ctx.InvokePackageRaw("databricks:index/getUser:getUser", args, &rv, "", opts...) - if err != nil { - return LookupUserResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupUserResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupUserResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getUser:getUser", args, LookupUserResultOutput{}, options).(LookupUserResultOutput), nil }).(LookupUserResultOutput) } diff --git a/sdk/go/databricks/getViews.go b/sdk/go/databricks/getViews.go index 100d6c012..450828ab9 100644 --- a/sdk/go/databricks/getViews.go +++ b/sdk/go/databricks/getViews.go @@ -100,21 +100,11 @@ type GetViewsResult struct { } func GetViewsOutput(ctx *pulumi.Context, args GetViewsOutputArgs, opts ...pulumi.InvokeOption) GetViewsResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetViewsResultOutput, error) { args := v.(GetViewsArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetViewsResult - secret, err := ctx.InvokePackageRaw("databricks:index/getViews:getViews", args, &rv, "", opts...) - if err != nil { - return GetViewsResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetViewsResultOutput) - if secret { - return pulumi.ToSecret(output).(GetViewsResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getViews:getViews", args, GetViewsResultOutput{}, options).(GetViewsResultOutput), nil }).(GetViewsResultOutput) } diff --git a/sdk/go/databricks/getVolume.go b/sdk/go/databricks/getVolume.go index 7a5749b3d..d0defac53 100644 --- a/sdk/go/databricks/getVolume.go +++ b/sdk/go/databricks/getVolume.go @@ -44,21 +44,11 @@ type LookupVolumeResult struct { } func LookupVolumeOutput(ctx *pulumi.Context, args LookupVolumeOutputArgs, opts ...pulumi.InvokeOption) LookupVolumeResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (LookupVolumeResultOutput, error) { args := v.(LookupVolumeArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv LookupVolumeResult - secret, err := ctx.InvokePackageRaw("databricks:index/getVolume:getVolume", args, &rv, "", opts...) - if err != nil { - return LookupVolumeResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(LookupVolumeResultOutput) - if secret { - return pulumi.ToSecret(output).(LookupVolumeResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getVolume:getVolume", args, LookupVolumeResultOutput{}, options).(LookupVolumeResultOutput), nil }).(LookupVolumeResultOutput) } diff --git a/sdk/go/databricks/getVolumes.go b/sdk/go/databricks/getVolumes.go index fcdf8e643..2fbb31ad3 100644 --- a/sdk/go/databricks/getVolumes.go +++ b/sdk/go/databricks/getVolumes.go @@ -87,21 +87,11 @@ type GetVolumesResult struct { } func GetVolumesOutput(ctx *pulumi.Context, args GetVolumesOutputArgs, opts ...pulumi.InvokeOption) GetVolumesResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetVolumesResultOutput, error) { args := v.(GetVolumesArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetVolumesResult - secret, err := ctx.InvokePackageRaw("databricks:index/getVolumes:getVolumes", args, &rv, "", opts...) - if err != nil { - return GetVolumesResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetVolumesResultOutput) - if secret { - return pulumi.ToSecret(output).(GetVolumesResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getVolumes:getVolumes", args, GetVolumesResultOutput{}, options).(GetVolumesResultOutput), nil }).(GetVolumesResultOutput) } diff --git a/sdk/go/databricks/getZones.go b/sdk/go/databricks/getZones.go index 4de1d928c..c51a970c2 100644 --- a/sdk/go/databricks/getZones.go +++ b/sdk/go/databricks/getZones.go @@ -69,21 +69,11 @@ type GetZonesResult struct { } func GetZonesOutput(ctx *pulumi.Context, args GetZonesOutputArgs, opts ...pulumi.InvokeOption) GetZonesResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetZonesResultOutput, error) { args := v.(GetZonesArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetZonesResult - secret, err := ctx.InvokePackageRaw("databricks:index/getZones:getZones", args, &rv, "", opts...) - if err != nil { - return GetZonesResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetZonesResultOutput) - if secret { - return pulumi.ToSecret(output).(GetZonesResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("databricks:index/getZones:getZones", args, GetZonesResultOutput{}, options).(GetZonesResultOutput), nil }).(GetZonesResultOutput) } diff --git a/sdk/java/build.gradle b/sdk/java/build.gradle index da59f18ce..2a0a8aff1 100644 --- a/sdk/java/build.gradle +++ b/sdk/java/build.gradle @@ -44,7 +44,7 @@ repositories { dependencies { implementation("com.google.code.findbugs:jsr305:3.0.2") implementation("com.google.code.gson:gson:2.8.9") - implementation("com.pulumi:pulumi:0.18.0") + implementation("com.pulumi:pulumi:0.20.0") } task sourcesJar(type: Jar) { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java b/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java index 9b22b9ac6..2f7c3b367 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java @@ -171,6 +171,7 @@ import com.pulumi.databricks.outputs.GetZonesResult; import com.pulumi.deployment.Deployment; import com.pulumi.deployment.InvokeOptions; +import com.pulumi.deployment.InvokeOutputOptions; import com.pulumi.resources.InvokeArgs; import java.util.concurrent.CompletableFuture; @@ -439,6 +440,94 @@ public static CompletableFuture getAwsAssumeRolePo public static Output getAwsAssumeRolePolicy(GetAwsAssumeRolePolicyArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getAwsAssumeRolePolicy:getAwsAssumeRolePolicy", TypeShape.of(GetAwsAssumeRolePolicyResult.class), args, Utilities.withVersion(options)); } + /** + * This data source constructs necessary AWS STS assume role policy for you. + * + * ## Example Usage + * + * End-to-end example of provisioning Cross-account IAM role with databricks.MwsCredentials and aws_iam_role: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetAwsCrossAccountPolicyArgs;
+     * import com.pulumi.aws.iam.Policy;
+     * import com.pulumi.aws.iam.PolicyArgs;
+     * import com.pulumi.databricks.inputs.GetAwsAssumeRolePolicyArgs;
+     * import com.pulumi.aws.iam.Role;
+     * import com.pulumi.aws.iam.RoleArgs;
+     * import com.pulumi.aws.iam.RolePolicyAttachment;
+     * import com.pulumi.aws.iam.RolePolicyAttachmentArgs;
+     * import com.pulumi.databricks.MwsCredentials;
+     * import com.pulumi.databricks.MwsCredentialsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var config = ctx.config();
+     *         final var databricksAccountId = config.get("databricksAccountId");
+     *         final var this = DatabricksFunctions.getAwsCrossAccountPolicy();
+     * 
+     *         var crossAccountPolicy = new Policy("crossAccountPolicy", PolicyArgs.builder()
+     *             .name(String.format("%s-crossaccount-iam-policy", prefix))
+     *             .policy(this_.json())
+     *             .build());
+     * 
+     *         final var thisGetAwsAssumeRolePolicy = DatabricksFunctions.getAwsAssumeRolePolicy(GetAwsAssumeRolePolicyArgs.builder()
+     *             .externalId(databricksAccountId)
+     *             .build());
+     * 
+     *         var crossAccount = new Role("crossAccount", RoleArgs.builder()
+     *             .name(String.format("%s-crossaccount-iam-role", prefix))
+     *             .assumeRolePolicy(thisGetAwsAssumeRolePolicy.applyValue(getAwsAssumeRolePolicyResult -> getAwsAssumeRolePolicyResult.json()))
+     *             .description("Grants Databricks full access to VPC resources")
+     *             .build());
+     * 
+     *         var crossAccountRolePolicyAttachment = new RolePolicyAttachment("crossAccountRolePolicyAttachment", RolePolicyAttachmentArgs.builder()
+     *             .policyArn(crossAccountPolicy.arn())
+     *             .role(crossAccount.name())
+     *             .build());
+     * 
+     *         // required only in case of multi-workspace setup
+     *         var thisMwsCredentials = new MwsCredentials("thisMwsCredentials", MwsCredentialsArgs.builder()
+     *             .accountId(databricksAccountId)
+     *             .credentialsName(String.format("%s-creds", prefix))
+     *             .roleArn(crossAccount.arn())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide + * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. + * * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). + * + */ + public static Output getAwsAssumeRolePolicy(GetAwsAssumeRolePolicyArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getAwsAssumeRolePolicy:getAwsAssumeRolePolicy", TypeShape.of(GetAwsAssumeRolePolicyResult.class), args, Utilities.withVersion(options)); + } /** * This data source constructs necessary AWS STS assume role policy for you. * @@ -701,6 +790,64 @@ public static CompletableFuture getAwsBucketPolicyPlai public static Output getAwsBucketPolicy(GetAwsBucketPolicyArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getAwsBucketPolicy:getAwsBucketPolicy", TypeShape.of(GetAwsBucketPolicyResult.class), args, Utilities.withVersion(options)); } + /** + * This datasource configures a simple access policy for AWS S3 buckets, so that Databricks can access data in it. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.aws.s3.BucketV2;
+     * import com.pulumi.aws.s3.BucketV2Args;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetAwsBucketPolicyArgs;
+     * import com.pulumi.aws.s3.BucketPolicy;
+     * import com.pulumi.aws.s3.BucketPolicyArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         var thisBucketV2 = new BucketV2("thisBucketV2", BucketV2Args.builder()
+     *             .bucket("")
+     *             .forceDestroy(true)
+     *             .build());
+     * 
+     *         final var this = DatabricksFunctions.getAwsBucketPolicy(GetAwsBucketPolicyArgs.builder()
+     *             .bucket(thisBucketV2.bucket())
+     *             .build());
+     * 
+     *         var thisBucketPolicy = new BucketPolicy("thisBucketPolicy", BucketPolicyArgs.builder()
+     *             .bucket(thisBucketV2.id())
+     *             .policy(this_.applyValue(this_ -> this_.json()))
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * Bucket policy with full access: + * + */ + public static Output getAwsBucketPolicy(GetAwsBucketPolicyArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getAwsBucketPolicy:getAwsBucketPolicy", TypeShape.of(GetAwsBucketPolicyResult.class), args, Utilities.withVersion(options)); + } /** * This datasource configures a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * @@ -1024,6 +1171,59 @@ public static CompletableFuture getAwsCrossAccou public static Output getAwsCrossAccountPolicy(GetAwsCrossAccountPolicyArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getAwsCrossAccountPolicy:getAwsCrossAccountPolicy", TypeShape.of(GetAwsCrossAccountPolicyResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** This data source can only be used with an account-level provider! + * + * This data source constructs necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). + * + * ## Example Usage + * + * For more detailed usage please see databricks.getAwsAssumeRolePolicy or databricks_aws_s3_mount pages. + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetAwsCrossAccountPolicyArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getAwsCrossAccountPolicy();
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide + * * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. + * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. + * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. + * + */ + public static Output getAwsCrossAccountPolicy(GetAwsCrossAccountPolicyArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getAwsCrossAccountPolicy:getAwsCrossAccountPolicy", TypeShape.of(GetAwsCrossAccountPolicyResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** This data source can only be used with an account-level provider! * @@ -1350,13 +1550,13 @@ public static Output getAwsUnityCatalo * <!--End PulumiCodeChooser --> * */ - public static CompletableFuture getAwsUnityCatalogAssumeRolePolicyPlain(GetAwsUnityCatalogAssumeRolePolicyPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getAwsUnityCatalogAssumeRolePolicy:getAwsUnityCatalogAssumeRolePolicy", TypeShape.of(GetAwsUnityCatalogAssumeRolePolicyResult.class), args, Utilities.withVersion(options)); + public static Output getAwsUnityCatalogAssumeRolePolicy(GetAwsUnityCatalogAssumeRolePolicyArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getAwsUnityCatalogAssumeRolePolicy:getAwsUnityCatalogAssumeRolePolicy", TypeShape.of(GetAwsUnityCatalogAssumeRolePolicyResult.class), args, Utilities.withVersion(options)); } /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs the necessary AWS Unity Catalog policy for you. + * This data source constructs the necessary AWS Unity Catalog assume role policy for you. * * ## Example Usage * @@ -1419,8 +1619,8 @@ public static CompletableFuture getAws * <!--End PulumiCodeChooser --> * */ - public static Output getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs args) { - return getAwsUnityCatalogPolicy(args, InvokeOptions.Empty); + public static CompletableFuture getAwsUnityCatalogAssumeRolePolicyPlain(GetAwsUnityCatalogAssumeRolePolicyPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getAwsUnityCatalogAssumeRolePolicy:getAwsUnityCatalogAssumeRolePolicy", TypeShape.of(GetAwsUnityCatalogAssumeRolePolicyResult.class), args, Utilities.withVersion(options)); } /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. @@ -1488,8 +1688,8 @@ public static Output getAwsUnityCatalogPolicy(Ge * <!--End PulumiCodeChooser --> * */ - public static CompletableFuture getAwsUnityCatalogPolicyPlain(GetAwsUnityCatalogPolicyPlainArgs args) { - return getAwsUnityCatalogPolicyPlain(args, InvokeOptions.Empty); + public static Output getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs args) { + return getAwsUnityCatalogPolicy(args, InvokeOptions.Empty); } /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. @@ -1557,8 +1757,8 @@ public static CompletableFuture getAwsUnityCatal * <!--End PulumiCodeChooser --> * */ - public static Output getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs args, InvokeOptions options) { - return Deployment.getInstance().invoke("databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy", TypeShape.of(GetAwsUnityCatalogPolicyResult.class), args, Utilities.withVersion(options)); + public static CompletableFuture getAwsUnityCatalogPolicyPlain(GetAwsUnityCatalogPolicyPlainArgs args) { + return getAwsUnityCatalogPolicyPlain(args, InvokeOptions.Empty); } /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. @@ -1626,20 +1826,16 @@ public static Output getAwsUnityCatalogPolicy(Ge * <!--End PulumiCodeChooser --> * */ - public static CompletableFuture getAwsUnityCatalogPolicyPlain(GetAwsUnityCatalogPolicyPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy", TypeShape.of(GetAwsUnityCatalogPolicyResult.class), args, Utilities.withVersion(options)); + public static Output getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy", TypeShape.of(GetAwsUnityCatalogPolicyResult.class), args, Utilities.withVersion(options)); } /** - * > **Note** This data source can only be used with a workspace-level provider! - * - * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * Retrieves details of a specific catalog in Unity Catalog, that were created by Pulumi or manually. Use databricks.getCatalogs to retrieve IDs of multiple catalogs from Unity Catalog + * This data source constructs the necessary AWS Unity Catalog policy for you. * * ## Example Usage * - * Read on a specific catalog `test`: - * * <!--Start PulumiCodeChooser --> *
      * {@code
@@ -1649,10 +1845,12 @@ public static CompletableFuture getAwsUnityCatal
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetCatalogArgs;
-     * import com.pulumi.databricks.Grants;
-     * import com.pulumi.databricks.GrantsArgs;
-     * import com.pulumi.databricks.inputs.GrantsGrantArgs;
+     * import com.pulumi.databricks.inputs.GetAwsUnityCatalogPolicyArgs;
+     * import com.pulumi.databricks.inputs.GetAwsUnityCatalogAssumeRolePolicyArgs;
+     * import com.pulumi.aws.iam.Policy;
+     * import com.pulumi.aws.iam.PolicyArgs;
+     * import com.pulumi.aws.iam.Role;
+     * import com.pulumi.aws.iam.RoleArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -1666,22 +1864,162 @@ public static CompletableFuture getAwsUnityCatal
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var test = DatabricksFunctions.getCatalog(GetCatalogArgs.builder()
-     *             .name("test")
+     *         final var this = DatabricksFunctions.getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs.builder()
+     *             .awsAccountId(awsAccountId)
+     *             .bucketName("databricks-bucket")
+     *             .roleName(String.format("%s-uc-access", prefix))
+     *             .kmsName("arn:aws:kms:us-west-2:111122223333:key/databricks-kms")
      *             .build());
      * 
-     *         var things = new Grants("things", GrantsArgs.builder()
-     *             .catalog(test.applyValue(getCatalogResult -> getCatalogResult.name()))
-     *             .grants(GrantsGrantArgs.builder()
-     *                 .principal("sensitive")
-     *                 .privileges("USE_CATALOG")
-     *                 .build())
+     *         final var thisGetAwsUnityCatalogAssumeRolePolicy = DatabricksFunctions.getAwsUnityCatalogAssumeRolePolicy(GetAwsUnityCatalogAssumeRolePolicyArgs.builder()
+     *             .awsAccountId(awsAccountId)
+     *             .roleName(String.format("%s-uc-access", prefix))
+     *             .externalId("12345")
      *             .build());
      * 
-     *     }
-     * }
-     * }
-     * 
+ * var unityMetastore = new Policy("unityMetastore", PolicyArgs.builder() + * .name(String.format("%s-unity-catalog-metastore-access-iam-policy", prefix)) + * .policy(this_.json()) + * .build()); + * + * var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder() + * .name(String.format("%s-uc-access", prefix)) + * .assumeRolePolicy(thisGetAwsUnityCatalogAssumeRolePolicy.applyValue(getAwsUnityCatalogAssumeRolePolicyResult -> getAwsUnityCatalogAssumeRolePolicyResult.json())) + * .managedPolicyArns(unityMetastore.arn()) + * .build()); + * + * } + * } + * } + * + * <!--End PulumiCodeChooser --> + * + */ + public static Output getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy", TypeShape.of(GetAwsUnityCatalogPolicyResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. + * + * This data source constructs the necessary AWS Unity Catalog policy for you. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetAwsUnityCatalogPolicyArgs;
+     * import com.pulumi.databricks.inputs.GetAwsUnityCatalogAssumeRolePolicyArgs;
+     * import com.pulumi.aws.iam.Policy;
+     * import com.pulumi.aws.iam.PolicyArgs;
+     * import com.pulumi.aws.iam.Role;
+     * import com.pulumi.aws.iam.RoleArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs.builder()
+     *             .awsAccountId(awsAccountId)
+     *             .bucketName("databricks-bucket")
+     *             .roleName(String.format("%s-uc-access", prefix))
+     *             .kmsName("arn:aws:kms:us-west-2:111122223333:key/databricks-kms")
+     *             .build());
+     * 
+     *         final var thisGetAwsUnityCatalogAssumeRolePolicy = DatabricksFunctions.getAwsUnityCatalogAssumeRolePolicy(GetAwsUnityCatalogAssumeRolePolicyArgs.builder()
+     *             .awsAccountId(awsAccountId)
+     *             .roleName(String.format("%s-uc-access", prefix))
+     *             .externalId("12345")
+     *             .build());
+     * 
+     *         var unityMetastore = new Policy("unityMetastore", PolicyArgs.builder()
+     *             .name(String.format("%s-unity-catalog-metastore-access-iam-policy", prefix))
+     *             .policy(this_.json())
+     *             .build());
+     * 
+     *         var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder()
+     *             .name(String.format("%s-uc-access", prefix))
+     *             .assumeRolePolicy(thisGetAwsUnityCatalogAssumeRolePolicy.applyValue(getAwsUnityCatalogAssumeRolePolicyResult -> getAwsUnityCatalogAssumeRolePolicyResult.json()))
+     *             .managedPolicyArns(unityMetastore.arn())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getAwsUnityCatalogPolicyPlain(GetAwsUnityCatalogPolicyPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy", TypeShape.of(GetAwsUnityCatalogPolicyResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** This data source can only be used with a workspace-level provider! + * + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves details of a specific catalog in Unity Catalog, that were created by Pulumi or manually. Use databricks.getCatalogs to retrieve IDs of multiple catalogs from Unity Catalog + * + * ## Example Usage + * + * Read on a specific catalog `test`: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetCatalogArgs;
+     * import com.pulumi.databricks.Grants;
+     * import com.pulumi.databricks.GrantsArgs;
+     * import com.pulumi.databricks.inputs.GrantsGrantArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var test = DatabricksFunctions.getCatalog(GetCatalogArgs.builder()
+     *             .name("test")
+     *             .build());
+     * 
+     *         var things = new Grants("things", GrantsArgs.builder()
+     *             .catalog(test.applyValue(getCatalogResult -> getCatalogResult.name()))
+     *             .grants(GrantsGrantArgs.builder()
+     *                 .principal("sensitive")
+     *                 .privileges("USE_CATALOG")
+     *                 .build())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
* <!--End PulumiCodeChooser --> * * ## Related Resources @@ -1827,6 +2165,72 @@ public static CompletableFuture getCatalogPlain(GetCatalogPlai public static Output getCatalog(GetCatalogArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getCatalog:getCatalog", TypeShape.of(GetCatalogResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** This data source can only be used with a workspace-level provider! + * + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves details of a specific catalog in Unity Catalog, that were created by Pulumi or manually. Use databricks.getCatalogs to retrieve IDs of multiple catalogs from Unity Catalog + * + * ## Example Usage + * + * Read on a specific catalog `test`: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetCatalogArgs;
+     * import com.pulumi.databricks.Grants;
+     * import com.pulumi.databricks.GrantsArgs;
+     * import com.pulumi.databricks.inputs.GrantsGrantArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var test = DatabricksFunctions.getCatalog(GetCatalogArgs.builder()
+     *             .name("test")
+     *             .build());
+     * 
+     *         var things = new Grants("things", GrantsArgs.builder()
+     *             .catalog(test.applyValue(getCatalogResult -> getCatalogResult.name()))
+     *             .grants(GrantsGrantArgs.builder()
+     *                 .principal("sensitive")
+     *                 .privileges("USE_CATALOG")
+     *                 .build())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Grant to manage grants within Unity Catalog. + * * databricks.getCatalogs to list all catalogs within Unity Catalog metastore. + * + */ + public static Output getCatalog(GetCatalogArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getCatalog:getCatalog", TypeShape.of(GetCatalogResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** This data source can only be used with a workspace-level provider! * @@ -2163,6 +2567,60 @@ public static CompletableFuture getCatalogsPlain(GetCatalogsP public static Output getCatalogs(GetCatalogsArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getCatalogs:getCatalogs", TypeShape.of(GetCatalogsResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** This data source can only be used with a workspace-level provider! + * + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves a list of databricks.Catalog ids, that were created by Pulumi or manually, so that special handling could be applied. + * + * ## Example Usage + * + * Listing all catalogs: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetCatalogsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getCatalogs();
+     * 
+     *         ctx.export("allCatalogs", all.applyValue(getCatalogsResult -> getCatalogsResult));
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Schema to manage schemas within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getCatalogs(GetCatalogsArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getCatalogs:getCatalogs", TypeShape.of(GetCatalogsResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** This data source can only be used with a workspace-level provider! * @@ -2357,6 +2815,34 @@ public static CompletableFuture getClusterPlain(GetClusterPlai public static Output getCluster(GetClusterArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getCluster:getCluster", TypeShape.of(GetClusterResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves information about a databricks.Cluster using its id. This could be retrieved programmatically using databricks.getClusters data source. + * + * ## Example Usage + * + * Retrieve attributes of each SQL warehouses in a workspace + * + * <!--Start PulumiCodeChooser --> + * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are often used in the same context: + * + * * End to end workspace management guide. + * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + * * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. + * * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. + * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + * * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster. + * * databricks.Pipeline to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). + * + */ + public static Output getCluster(GetClusterArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getCluster:getCluster", TypeShape.of(GetClusterResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * @@ -2645,6 +3131,58 @@ public static CompletableFuture getClusterPolicyPlain(Ge public static Output getClusterPolicy(GetClusterPolicyArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getClusterPolicy:getClusterPolicy", TypeShape.of(GetClusterPolicyResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves information about databricks_cluster_policy. + * + * ## Example Usage + * + * Referring to a cluster policy by name: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetClusterPolicyArgs;
+     * import com.pulumi.databricks.Cluster;
+     * import com.pulumi.databricks.ClusterArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var personal = DatabricksFunctions.getClusterPolicy(GetClusterPolicyArgs.builder()
+     *             .name("Personal Compute")
+     *             .build());
+     * 
+     *         var myCluster = new Cluster("myCluster", ClusterArgs.builder()
+     *             .policyId(personal.applyValue(getClusterPolicyResult -> getClusterPolicyResult.id()))
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getClusterPolicy(GetClusterPolicyArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getClusterPolicy:getClusterPolicy", TypeShape.of(GetClusterPolicyResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * @@ -3240,8 +3778,132 @@ public static Output getClusters(GetClustersArgs args, Invoke * * databricks.Pipeline to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). * */ - public static CompletableFuture getClustersPlain(GetClustersPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getClusters:getClusters", TypeShape.of(GetClustersResult.class), args, Utilities.withVersion(options)); + public static Output getClusters(GetClustersArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getClusters:getClusters", TypeShape.of(GetClustersResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves a list of databricks.Cluster ids, that were created by Pulumi or manually, with or without databricks_cluster_policy. + * + * ## Example Usage + * + * Retrieve cluster IDs for all clusters: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetClustersArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getClusters();
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * Retrieve cluster IDs for all clusters having "Shared" in the cluster name: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetClustersArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var allShared = DatabricksFunctions.getClusters(GetClustersArgs.builder()
+     *             .clusterNameContains("shared")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * End to end workspace management guide. + * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + * * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. + * * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. + * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + * * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster. + * * databricks.Pipeline to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). + * + */ + public static CompletableFuture getClustersPlain(GetClustersPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getClusters:getClusters", TypeShape.of(GetClustersResult.class), args, Utilities.withVersion(options)); + } + /** + * Retrieves information about the currently configured provider to make a decision, for example, add a dynamic block based on the specific cloud. + * + * ## Example Usage + * + * Create cloud-specific databricks_storage_credential: + * + * <!--Start PulumiCodeChooser --> + * <!--End PulumiCodeChooser --> + * + * ## Exported attributes + * + * Data source exposes the following attributes: + * + * * `is_account` - Whether the provider is configured at account-level + * * `account_id` - Account Id if provider is configured at account-level + * * `host` - Host of the Databricks workspace or account console + * * `cloud_type` - Cloud type specified in the provider + * * `auth_type` - Auth type used by the provider + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * End to end workspace management guide + * * databricks.Directory to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). + * * databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). + * * databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html). + * + */ + public static Output getCurrentConfig() { + return getCurrentConfig(GetCurrentConfigArgs.Empty, InvokeOptions.Empty); } /** * Retrieves information about the currently configured provider to make a decision, for example, add a dynamic block based on the specific cloud. @@ -3273,8 +3935,8 @@ public static CompletableFuture getClustersPlain(GetClustersP * * databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html). * */ - public static Output getCurrentConfig() { - return getCurrentConfig(GetCurrentConfigArgs.Empty, InvokeOptions.Empty); + public static CompletableFuture getCurrentConfigPlain() { + return getCurrentConfigPlain(GetCurrentConfigPlainArgs.Empty, InvokeOptions.Empty); } /** * Retrieves information about the currently configured provider to make a decision, for example, add a dynamic block based on the specific cloud. @@ -3306,8 +3968,8 @@ public static Output getCurrentConfig() { * * databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html). * */ - public static CompletableFuture getCurrentConfigPlain() { - return getCurrentConfigPlain(GetCurrentConfigPlainArgs.Empty, InvokeOptions.Empty); + public static Output getCurrentConfig(GetCurrentConfigArgs args) { + return getCurrentConfig(args, InvokeOptions.Empty); } /** * Retrieves information about the currently configured provider to make a decision, for example, add a dynamic block based on the specific cloud. @@ -3339,8 +4001,8 @@ public static CompletableFuture getCurrentConfigPlain() * * databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html). * */ - public static Output getCurrentConfig(GetCurrentConfigArgs args) { - return getCurrentConfig(args, InvokeOptions.Empty); + public static CompletableFuture getCurrentConfigPlain(GetCurrentConfigPlainArgs args) { + return getCurrentConfigPlain(args, InvokeOptions.Empty); } /** * Retrieves information about the currently configured provider to make a decision, for example, add a dynamic block based on the specific cloud. @@ -3372,8 +4034,8 @@ public static Output getCurrentConfig(GetCurrentConfigAr * * databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html). * */ - public static CompletableFuture getCurrentConfigPlain(GetCurrentConfigPlainArgs args) { - return getCurrentConfigPlain(args, InvokeOptions.Empty); + public static Output getCurrentConfig(GetCurrentConfigArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getCurrentConfig:getCurrentConfig", TypeShape.of(GetCurrentConfigResult.class), args, Utilities.withVersion(options)); } /** * Retrieves information about the currently configured provider to make a decision, for example, add a dynamic block based on the specific cloud. @@ -3405,7 +4067,7 @@ public static CompletableFuture getCurrentConfigPlain(Ge * * databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html). * */ - public static Output getCurrentConfig(GetCurrentConfigArgs args, InvokeOptions options) { + public static Output getCurrentConfig(GetCurrentConfigArgs args, InvokeOutputOptions options) { return Deployment.getInstance().invoke("databricks:index/getCurrentConfig:getCurrentConfig", TypeShape.of(GetCurrentConfigResult.class), args, Utilities.withVersion(options)); } /** @@ -3721,6 +4383,62 @@ public static CompletableFuture getCurrentMetastorePl public static Output getCurrentMetastore(GetCurrentMetastoreArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getCurrentMetastore:getCurrentMetastore", TypeShape.of(GetCurrentMetastoreResult.class), args, Utilities.withVersion(options)); } + /** + * Retrieves information about metastore attached to a given workspace. + * + * > **Note** This is the workspace-level data source. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreSummary response for a metastore attached to the current workspace. + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetCurrentMetastoreArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getCurrentMetastore();
+     * 
+     *         ctx.export("someMetastore", this_.metastoreInfo());
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Metastore to get information for a metastore with a given ID. + * * databricks.getMetastores to get a mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getCurrentMetastore(GetCurrentMetastoreArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getCurrentMetastore:getCurrentMetastore", TypeShape.of(GetCurrentMetastoreResult.class), args, Utilities.withVersion(options)); + } /** * Retrieves information about metastore attached to a given workspace. * @@ -3822,6 +4540,15 @@ public static CompletableFuture getCurrentUserPlain(Invoke public static Output getCurrentUser(InvokeArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getCurrentUser:getCurrentUser", TypeShape.of(GetCurrentUserResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves information about databricks.User or databricks_service_principal, that is calling Databricks REST API. Might be useful in applying the same Pulumi by different users in the shared workspace for testing purposes. + * + */ + public static Output getCurrentUser(InvokeArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getCurrentUser:getCurrentUser", TypeShape.of(GetCurrentUserResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * @@ -3993,6 +4720,60 @@ public static CompletableFuture getDbfsFilePlain(GetDbfsFileP public static Output getDbfsFile(GetDbfsFileArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getDbfsFile:getDbfsFile", TypeShape.of(GetDbfsFileResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * This data source allows to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetDbfsFileArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var report = DatabricksFunctions.getDbfsFile(GetDbfsFileArgs.builder()
+     *             .path("dbfs:/reports/some.csv")
+     *             .limitFileSize("true")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * End to end workspace management guide. + * * databricks.getDbfsFilePaths data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * * databricks.DbfsFile to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * * databricks.Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. + * + */ + public static Output getDbfsFile(GetDbfsFileArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getDbfsFile:getDbfsFile", TypeShape.of(GetDbfsFileResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * @@ -4100,8 +4881,120 @@ public static CompletableFuture getDbfsFilePlain(GetDbfsFileP * * databricks.Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. * */ - public static Output getDbfsFilePaths(GetDbfsFilePathsArgs args) { - return getDbfsFilePaths(args, InvokeOptions.Empty); + public static Output getDbfsFilePaths(GetDbfsFilePathsArgs args) { + return getDbfsFilePaths(args, InvokeOptions.Empty); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * This data source allows to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetDbfsFilePathsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var partitions = DatabricksFunctions.getDbfsFilePaths(GetDbfsFilePathsArgs.builder()
+     *             .path("dbfs:/user/hive/default.db/table")
+     *             .recursive(false)
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * End to end workspace management guide. + * * databricks.DbfsFile data to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * * databricks.getDbfsFilePaths data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * * databricks.DbfsFile to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster. + * * databricks.Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. + * + */ + public static CompletableFuture getDbfsFilePathsPlain(GetDbfsFilePathsPlainArgs args) { + return getDbfsFilePathsPlain(args, InvokeOptions.Empty); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * This data source allows to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetDbfsFilePathsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var partitions = DatabricksFunctions.getDbfsFilePaths(GetDbfsFilePathsArgs.builder()
+     *             .path("dbfs:/user/hive/default.db/table")
+     *             .recursive(false)
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * End to end workspace management guide. + * * databricks.DbfsFile data to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * * databricks.getDbfsFilePaths data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * * databricks.DbfsFile to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster. + * * databricks.Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. + * + */ + public static Output getDbfsFilePaths(GetDbfsFilePathsArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getDbfsFilePaths:getDbfsFilePaths", TypeShape.of(GetDbfsFilePathsResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -4156,8 +5049,8 @@ public static Output getDbfsFilePaths(GetDbfsFilePathsAr * * databricks.Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. * */ - public static CompletableFuture getDbfsFilePathsPlain(GetDbfsFilePathsPlainArgs args) { - return getDbfsFilePathsPlain(args, InvokeOptions.Empty); + public static Output getDbfsFilePaths(GetDbfsFilePathsArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getDbfsFilePaths:getDbfsFilePaths", TypeShape.of(GetDbfsFilePathsResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -4212,13 +5105,13 @@ public static CompletableFuture getDbfsFilePathsPlain(Ge * * databricks.Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. * */ - public static Output getDbfsFilePaths(GetDbfsFilePathsArgs args, InvokeOptions options) { - return Deployment.getInstance().invoke("databricks:index/getDbfsFilePaths:getDbfsFilePaths", TypeShape.of(GetDbfsFilePathsResult.class), args, Utilities.withVersion(options)); + public static CompletableFuture getDbfsFilePathsPlain(GetDbfsFilePathsPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getDbfsFilePaths:getDbfsFilePaths", TypeShape.of(GetDbfsFilePathsResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * - * This data source allows to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). + * This data source allows to get information about a directory in a Databricks Workspace. * * ## Example Usage * @@ -4231,7 +5124,7 @@ public static Output getDbfsFilePaths(GetDbfsFilePathsAr * import com.pulumi.Pulumi; * import com.pulumi.core.Output; * import com.pulumi.databricks.DatabricksFunctions; - * import com.pulumi.databricks.inputs.GetDbfsFilePathsArgs; + * import com.pulumi.databricks.inputs.GetDirectoryArgs; * import java.util.List; * import java.util.ArrayList; * import java.util.Map; @@ -4245,9 +5138,8 @@ public static Output getDbfsFilePaths(GetDbfsFilePathsAr * } * * public static void stack(Context ctx) { - * final var partitions = DatabricksFunctions.getDbfsFilePaths(GetDbfsFilePathsArgs.builder() - * .path("dbfs:/user/hive/default.db/table") - * .recursive(false) + * final var prod = DatabricksFunctions.getDirectory(GetDirectoryArgs.builder() + * .path("/Production") * .build()); * * } @@ -4256,20 +5148,9 @@ public static Output getDbfsFilePaths(GetDbfsFilePathsAr * * <!--End PulumiCodeChooser --> * - * ## Related Resources - * - * The following resources are used in the same context: - * - * * End to end workspace management guide. - * * databricks.DbfsFile data to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). - * * databricks.getDbfsFilePaths data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). - * * databricks.DbfsFile to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). - * * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster. - * * databricks.Mount to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. - * */ - public static CompletableFuture getDbfsFilePathsPlain(GetDbfsFilePathsPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getDbfsFilePaths:getDbfsFilePaths", TypeShape.of(GetDbfsFilePathsResult.class), args, Utilities.withVersion(options)); + public static Output getDirectory(GetDirectoryArgs args) { + return getDirectory(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -4312,8 +5193,8 @@ public static CompletableFuture getDbfsFilePathsPlain(Ge * <!--End PulumiCodeChooser --> * */ - public static Output getDirectory(GetDirectoryArgs args) { - return getDirectory(args, InvokeOptions.Empty); + public static CompletableFuture getDirectoryPlain(GetDirectoryPlainArgs args) { + return getDirectoryPlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -4356,8 +5237,8 @@ public static Output getDirectory(GetDirectoryArgs args) { * <!--End PulumiCodeChooser --> * */ - public static CompletableFuture getDirectoryPlain(GetDirectoryPlainArgs args) { - return getDirectoryPlain(args, InvokeOptions.Empty); + public static Output getDirectory(GetDirectoryArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getDirectory:getDirectory", TypeShape.of(GetDirectoryResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -4400,7 +5281,7 @@ public static CompletableFuture getDirectoryPlain(GetDirecto * <!--End PulumiCodeChooser --> * */ - public static Output getDirectory(GetDirectoryArgs args, InvokeOptions options) { + public static Output getDirectory(GetDirectoryArgs args, InvokeOutputOptions options) { return Deployment.getInstance().invoke("databricks:index/getDirectory:getDirectory", TypeShape.of(GetDirectoryResult.class), args, Utilities.withVersion(options)); } /** @@ -4609,6 +5490,60 @@ public static CompletableFuture getExternalLocationPl public static Output getExternalLocation(GetExternalLocationArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getExternalLocation:getExternalLocation", TypeShape.of(GetExternalLocationResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** This data source can only be used with a workspace-level provider! + * + * Retrieves details about a databricks.ExternalLocation that were created by Pulumi or manually. + * + * ## Example Usage + * + * Getting details of an existing external location in the metastore + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetExternalLocationArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getExternalLocation(GetExternalLocationArgs.builder()
+     *             .name("this")
+     *             .build());
+     * 
+     *         ctx.export("createdBy", this_.externalLocationInfo().createdBy());
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.getExternalLocations to get names of all external locations + * * databricks.ExternalLocation to manage external locations within Unity Catalog. + * + */ + public static Output getExternalLocation(GetExternalLocationArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getExternalLocation:getExternalLocation", TypeShape.of(GetExternalLocationResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** This data source can only be used with a workspace-level provider! * @@ -4923,6 +5858,58 @@ public static CompletableFuture getExternalLocations public static Output getExternalLocations(GetExternalLocationsArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getExternalLocations:getExternalLocations", TypeShape.of(GetExternalLocationsResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** This data source can only be used with a workspace-level provider! + * + * Retrieves a list of databricks.ExternalLocation objects, that were created by Pulumi or manually, so that special handling could be applied. + * + * ## Example Usage + * + * List all external locations in the metastore + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetExternalLocationsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getExternalLocations();
+     * 
+     *         ctx.export("allExternalLocations", all.applyValue(getExternalLocationsResult -> getExternalLocationsResult.names()));
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.ExternalLocation to get information about a single external location + * * databricks.ExternalLocation to manage external locations within Unity Catalog. + * + */ + public static Output getExternalLocations(GetExternalLocationsArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getExternalLocations:getExternalLocations", TypeShape.of(GetExternalLocationsResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** This data source can only be used with a workspace-level provider! * @@ -5140,11 +6127,119 @@ public static Output getFunctions(GetFunctionsArgs args, Inv /** * > This data source can only be used with a workspace-level provider! * - * Retrieves a list of [User-Defined Functions (UDFs) registered in the Unity Catalog](https://docs.databricks.com/en/udf/unity-catalog.html). + * Retrieves a list of [User-Defined Functions (UDFs) registered in the Unity Catalog](https://docs.databricks.com/en/udf/unity-catalog.html). + * + * ## Example Usage + * + * List all functions defined in a specific schema (`main.default` in this example): + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetFunctionsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getFunctions(GetFunctionsArgs.builder()
+     *             .catalogName("main")
+     *             .schemaName("default")
+     *             .build());
+     * 
+     *         ctx.export("allExternalLocations", all.applyValue(getFunctionsResult -> getFunctionsResult.functions()));
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Schema to get information about a single schema + * + */ + public static Output getFunctions(GetFunctionsArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getFunctions:getFunctions", TypeShape.of(GetFunctionsResult.class), args, Utilities.withVersion(options)); + } + /** + * > This data source can only be used with a workspace-level provider! + * + * Retrieves a list of [User-Defined Functions (UDFs) registered in the Unity Catalog](https://docs.databricks.com/en/udf/unity-catalog.html). + * + * ## Example Usage + * + * List all functions defined in a specific schema (`main.default` in this example): + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetFunctionsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getFunctions(GetFunctionsArgs.builder()
+     *             .catalogName("main")
+     *             .schemaName("default")
+     *             .build());
+     * 
+     *         ctx.export("allExternalLocations", all.applyValue(getFunctionsResult -> getFunctionsResult.functions()));
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Schema to get information about a single schema + * + */ + public static CompletableFuture getFunctionsPlain(GetFunctionsPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getFunctions:getFunctions", TypeShape.of(GetFunctionsResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves information about databricks.Group members, entitlements and instance profiles. * * ## Example Usage * - * List all functions defined in a specific schema (`main.default` in this example): + * Adding user to administrative group * * <!--Start PulumiCodeChooser --> *
@@ -5155,7 +6250,11 @@ public static Output getFunctions(GetFunctionsArgs args, Inv
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetFunctionsArgs;
+     * import com.pulumi.databricks.inputs.GetGroupArgs;
+     * import com.pulumi.databricks.User;
+     * import com.pulumi.databricks.UserArgs;
+     * import com.pulumi.databricks.GroupMember;
+     * import com.pulumi.databricks.GroupMemberArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -5163,20 +6262,27 @@ public static Output getFunctions(GetFunctionsArgs args, Inv
      * import java.nio.file.Files;
      * import java.nio.file.Paths;
      * 
-     * public class App {
-     *     public static void main(String[] args) {
+     * public class App }{{@code
+     *     public static void main(String[] args) }{{@code
      *         Pulumi.run(App::stack);
-     *     }
+     *     }}{@code
      * 
-     *     public static void stack(Context ctx) {
-     *         final var all = DatabricksFunctions.getFunctions(GetFunctionsArgs.builder()
-     *             .catalogName("main")
-     *             .schemaName("default")
+     *     public static void stack(Context ctx) }{{@code
+     *         final var admins = DatabricksFunctions.getGroup(GetGroupArgs.builder()
+     *             .displayName("admins")
      *             .build());
      * 
-     *         ctx.export("allExternalLocations", all.applyValue(getFunctionsResult -> getFunctionsResult.functions()));
-     *     }
-     * }
+     *         var me = new User("me", UserArgs.builder()
+     *             .userName("me}{@literal @}{@code example.com")
+     *             .build());
+     * 
+     *         var myMemberA = new GroupMember("myMemberA", GroupMemberArgs.builder()
+     *             .groupId(admins.applyValue(getGroupResult -> getGroupResult.id()))
+     *             .memberId(me.id())
+     *             .build());
+     * 
+     *     }}{@code
+     * }}{@code
      * }
      * 
* <!--End PulumiCodeChooser --> @@ -5185,11 +6291,16 @@ public static Output getFunctions(GetFunctionsArgs args, Inv * * The following resources are used in the same context: * - * * databricks.Schema to get information about a single schema + * * End to end workspace management guide + * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + * * databricks.Directory to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). + * * databricks.GroupMember to attach users and groups as group members. + * * databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. + * * databricks.User to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to databricks.Group within the workspace. * */ - public static CompletableFuture getFunctionsPlain(GetFunctionsPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getFunctions:getFunctions", TypeShape.of(GetFunctionsResult.class), args, Utilities.withVersion(options)); + public static Output getGroup(GetGroupArgs args) { + return getGroup(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -5258,8 +6369,8 @@ public static CompletableFuture getFunctionsPlain(GetFunctio * * databricks.User to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to databricks.Group within the workspace. * */ - public static Output getGroup(GetGroupArgs args) { - return getGroup(args, InvokeOptions.Empty); + public static CompletableFuture getGroupPlain(GetGroupPlainArgs args) { + return getGroupPlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -5328,8 +6439,8 @@ public static Output getGroup(GetGroupArgs args) { * * databricks.User to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to databricks.Group within the workspace. * */ - public static CompletableFuture getGroupPlain(GetGroupPlainArgs args) { - return getGroupPlain(args, InvokeOptions.Empty); + public static Output getGroup(GetGroupArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getGroup:getGroup", TypeShape.of(GetGroupResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -5398,7 +6509,7 @@ public static CompletableFuture getGroupPlain(GetGroupPlainArgs * * databricks.User to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to databricks.Group within the workspace. * */ - public static Output getGroup(GetGroupArgs args, InvokeOptions options) { + public static Output getGroup(GetGroupArgs args, InvokeOutputOptions options) { return Deployment.getInstance().invoke("databricks:index/getGroup:getGroup", TypeShape.of(GetGroupResult.class), args, Utilities.withVersion(options)); } /** @@ -5627,6 +6738,58 @@ public static CompletableFuture getInstancePoolPlain(GetI public static Output getInstancePool(GetInstancePoolArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getInstancePool:getInstancePool", TypeShape.of(GetInstancePoolResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves information about databricks_instance_pool. + * + * ## Example Usage + * + * Referring to an instance pool by name: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetInstancePoolArgs;
+     * import com.pulumi.databricks.Cluster;
+     * import com.pulumi.databricks.ClusterArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var pool = DatabricksFunctions.getInstancePool(GetInstancePoolArgs.builder()
+     *             .name("All spot")
+     *             .build());
+     * 
+     *         var myCluster = new Cluster("myCluster", ClusterArgs.builder()
+     *             .instancePoolId(pool.applyValue(getInstancePoolResult -> getInstancePoolResult.id()))
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getInstancePool(GetInstancePoolArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getInstancePool:getInstancePool", TypeShape.of(GetInstancePoolResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * @@ -5894,6 +7057,49 @@ public static CompletableFuture getInstanceProfilesPl public static Output getInstanceProfiles(GetInstanceProfilesArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getInstanceProfiles:getInstanceProfiles", TypeShape.of(GetInstanceProfilesResult.class), args, Utilities.withVersion(options)); } + /** + * Lists all available databricks_instance_profiles. + * + * ## Example Usage + * + * Get all instance profiles: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetInstanceProfilesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getInstanceProfiles();
+     * 
+     *         ctx.export("allInstanceProfiles", all.applyValue(getInstanceProfilesResult -> getInstanceProfilesResult.instanceProfiles()));
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getInstanceProfiles(GetInstanceProfilesArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getInstanceProfiles:getInstanceProfiles", TypeShape.of(GetInstanceProfilesResult.class), args, Utilities.withVersion(options)); + } /** * Lists all available databricks_instance_profiles. * @@ -5988,8 +7194,116 @@ public static CompletableFuture getInstanceProfilesPl * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. * */ - public static Output getJob() { - return getJob(GetJobArgs.Empty, InvokeOptions.Empty); + public static Output getJob() { + return getJob(GetJobArgs.Empty, InvokeOptions.Empty); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves the settings of databricks.Job by name or by id. Complements the feature of the databricks.getJobs data source. + * + * ## Example Usage + * + * Getting the existing cluster id of specific databricks.Job by name or by id: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetJobArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getJob(GetJobArgs.builder()
+     *             .jobName("My job")
+     *             .build());
+     * 
+     *         ctx.export("jobNumWorkers", this_.jobSettings().settings().newCluster().numWorkers());
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.getJobs data to get all jobs and their names from a workspace. + * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + * + */ + public static CompletableFuture getJobPlain() { + return getJobPlain(GetJobPlainArgs.Empty, InvokeOptions.Empty); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves the settings of databricks.Job by name or by id. Complements the feature of the databricks.getJobs data source. + * + * ## Example Usage + * + * Getting the existing cluster id of specific databricks.Job by name or by id: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetJobArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getJob(GetJobArgs.builder()
+     *             .jobName("My job")
+     *             .build());
+     * 
+     *         ctx.export("jobNumWorkers", this_.jobSettings().settings().newCluster().numWorkers());
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.getJobs data to get all jobs and their names from a workspace. + * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + * + */ + public static Output getJob(GetJobArgs args) { + return getJob(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -6042,8 +7356,8 @@ public static Output getJob() { * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. * */ - public static CompletableFuture getJobPlain() { - return getJobPlain(GetJobPlainArgs.Empty, InvokeOptions.Empty); + public static CompletableFuture getJobPlain(GetJobPlainArgs args) { + return getJobPlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -6096,8 +7410,8 @@ public static CompletableFuture getJobPlain() { * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. * */ - public static Output getJob(GetJobArgs args) { - return getJob(args, InvokeOptions.Empty); + public static Output getJob(GetJobArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getJob:getJob", TypeShape.of(GetJobResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -6150,8 +7464,8 @@ public static Output getJob(GetJobArgs args) { * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. * */ - public static CompletableFuture getJobPlain(GetJobPlainArgs args) { - return getJobPlain(args, InvokeOptions.Empty); + public static Output getJob(GetJobArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getJob:getJob", TypeShape.of(GetJobResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -6204,17 +7518,19 @@ public static CompletableFuture getJobPlain(GetJobPlainArgs args) * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. * */ - public static Output getJob(GetJobArgs args, InvokeOptions options) { - return Deployment.getInstance().invoke("databricks:index/getJob:getJob", TypeShape.of(GetJobResult.class), args, Utilities.withVersion(options)); + public static CompletableFuture getJobPlain(GetJobPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getJob:getJob", TypeShape.of(GetJobResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * - * Retrieves the settings of databricks.Job by name or by id. Complements the feature of the databricks.getJobs data source. + * Retrieves a list of databricks.Job ids, that were created by Pulumi or manually, so that special handling could be applied. + * + * > **Note** Data resource will error in case of jobs with duplicate names. * * ## Example Usage * - * Getting the existing cluster id of specific databricks.Job by name or by id: + * Granting view databricks.Permissions to all databricks.Job within the workspace: * * <!--Start PulumiCodeChooser --> *
@@ -6225,7 +7541,11 @@ public static Output getJob(GetJobArgs args, InvokeOptions options
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetJobArgs;
+     * import com.pulumi.databricks.inputs.GetJobsArgs;
+     * import com.pulumi.databricks.Permissions;
+     * import com.pulumi.databricks.PermissionsArgs;
+     * import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
+     * import com.pulumi.codegen.internal.KeyedValue;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -6239,11 +7559,59 @@ public static Output getJob(GetJobArgs args, InvokeOptions options
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var this = DatabricksFunctions.getJob(GetJobArgs.builder()
-     *             .jobName("My job")
-     *             .build());
+     *         final var this = DatabricksFunctions.getJobs();
      * 
-     *         ctx.export("jobNumWorkers", this_.jobSettings().settings().newCluster().numWorkers());
+     *         final var everyoneCanViewAllJobs = this.applyValue(getJobsResult -> {
+     *             final var resources = new ArrayList();
+     *             for (var range : KeyedValue.of(getJobsResult.ids()) {
+     *                 var resource = new Permissions("everyoneCanViewAllJobs-" + range.key(), PermissionsArgs.builder()
+     *                     .jobId(range.value())
+     *                     .accessControls(PermissionsAccessControlArgs.builder()
+     *                         .groupName("users")
+     *                         .permissionLevel("CAN_VIEW")
+     *                         .build())
+     *                     .build());
+     * 
+     *                 resources.add(resource);
+     *             }
+     * 
+     *             return resources;
+     *         });
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * Getting ID of specific databricks.Job by name: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetJobsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getJobs();
+     * 
+     *         ctx.export("x", String.format("ID of `x` job is %s", this_.ids().x()));
      *     }
      * }
      * }
@@ -6254,12 +7622,11 @@ public static Output getJob(GetJobArgs args, InvokeOptions options
      * 
      * The following resources are used in the same context:
      * 
-     * * databricks.getJobs data to get all jobs and their names from a workspace.
      * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
      * 
      */
-    public static CompletableFuture getJobPlain(GetJobPlainArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invokeAsync("databricks:index/getJob:getJob", TypeShape.of(GetJobResult.class), args, Utilities.withVersion(options));
+    public static Output getJobs() {
+        return getJobs(GetJobsArgs.Empty, InvokeOptions.Empty);
     }
     /**
      * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
@@ -6365,8 +7732,8 @@ public static CompletableFuture getJobPlain(GetJobPlainArgs args,
      * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
      * 
      */
-    public static Output getJobs() {
-        return getJobs(GetJobsArgs.Empty, InvokeOptions.Empty);
+    public static CompletableFuture getJobsPlain() {
+        return getJobsPlain(GetJobsPlainArgs.Empty, InvokeOptions.Empty);
     }
     /**
      * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
@@ -6472,8 +7839,8 @@ public static Output getJobs() {
      * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
      * 
      */
-    public static CompletableFuture getJobsPlain() {
-        return getJobsPlain(GetJobsPlainArgs.Empty, InvokeOptions.Empty);
+    public static Output getJobs(GetJobsArgs args) {
+        return getJobs(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
@@ -6579,8 +7946,8 @@ public static CompletableFuture getJobsPlain() {
      * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
      * 
      */
-    public static Output getJobs(GetJobsArgs args) {
-        return getJobs(args, InvokeOptions.Empty);
+    public static CompletableFuture getJobsPlain(GetJobsPlainArgs args) {
+        return getJobsPlain(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
@@ -6686,8 +8053,8 @@ public static Output getJobs(GetJobsArgs args) {
      * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
      * 
      */
-    public static CompletableFuture getJobsPlain(GetJobsPlainArgs args) {
-        return getJobsPlain(args, InvokeOptions.Empty);
+    public static Output getJobs(GetJobsArgs args, InvokeOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getJobs:getJobs", TypeShape.of(GetJobsResult.class), args, Utilities.withVersion(options));
     }
     /**
      * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
@@ -6793,7 +8160,7 @@ public static CompletableFuture getJobsPlain(GetJobsPlainArgs arg
      * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
      * 
      */
-    public static Output getJobs(GetJobsArgs args, InvokeOptions options) {
+    public static Output getJobs(GetJobsArgs args, InvokeOutputOptions options) {
         return Deployment.getInstance().invoke("databricks:index/getJobs:getJobs", TypeShape.of(GetJobsResult.class), args, Utilities.withVersion(options));
     }
     /**
@@ -7277,7 +8644,153 @@ public static Output getMetastore(GetMetastoreArgs args, Inv
      * 
      * ## Example Usage
      * 
-     * MetastoreInfo response for a given metastore id
+     * MetastoreInfo response for a given metastore id
+     * 
+     * <!--Start PulumiCodeChooser -->
+     * 
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.aws.s3.BucketV2;
+     * import com.pulumi.aws.s3.BucketV2Args;
+     * import com.pulumi.databricks.Metastore;
+     * import com.pulumi.databricks.MetastoreArgs;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetMetastoreArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         var metastore = new BucketV2("metastore", BucketV2Args.builder()
+     *             .bucket(String.format("%s-metastore", prefix))
+     *             .forceDestroy(true)
+     *             .build());
+     * 
+     *         var thisMetastore = new Metastore("thisMetastore", MetastoreArgs.builder()
+     *             .name("primary")
+     *             .storageRoot(metastore.id().applyValue(id -> String.format("s3://%s/metastore", id)))
+     *             .owner(unityAdminGroup)
+     *             .forceDestroy(true)
+     *             .build());
+     * 
+     *         final var this = DatabricksFunctions.getMetastore(GetMetastoreArgs.builder()
+     *             .metastoreId(thisMetastore.id())
+     *             .build());
+     * 
+     *         ctx.export("someMetastore", this_.applyValue(this_ -> this_.metastoreInfo()));
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.getMetastores to get mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getMetastore(GetMetastoreArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getMetastore:getMetastore", TypeShape.of(GetMetastoreResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** This data source can only be used with an account-level provider! + * + * Retrieves information about metastore for a given id of databricks.Metastore object, that was created by Pulumi or manually, so that special handling could be applied. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreInfo response for a given metastore id + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.aws.s3.BucketV2;
+     * import com.pulumi.aws.s3.BucketV2Args;
+     * import com.pulumi.databricks.Metastore;
+     * import com.pulumi.databricks.MetastoreArgs;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetMetastoreArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         var metastore = new BucketV2("metastore", BucketV2Args.builder()
+     *             .bucket(String.format("%s-metastore", prefix))
+     *             .forceDestroy(true)
+     *             .build());
+     * 
+     *         var thisMetastore = new Metastore("thisMetastore", MetastoreArgs.builder()
+     *             .name("primary")
+     *             .storageRoot(metastore.id().applyValue(id -> String.format("s3://%s/metastore", id)))
+     *             .owner(unityAdminGroup)
+     *             .forceDestroy(true)
+     *             .build());
+     * 
+     *         final var this = DatabricksFunctions.getMetastore(GetMetastoreArgs.builder()
+     *             .metastoreId(thisMetastore.id())
+     *             .build());
+     * 
+     *         ctx.export("someMetastore", this_.applyValue(this_ -> this_.metastoreInfo()));
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.getMetastores to get mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static CompletableFuture getMetastorePlain(GetMetastorePlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getMetastore:getMetastore", TypeShape.of(GetMetastoreResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** This data source can only be used with an account-level provider! + * + * Retrieves a mapping of name to id of databricks.Metastore objects, that were created by Pulumi or manually, so that special handling could be applied. + * + * > **Note** `account_id` provider configuration property is required for this resource to work. Data resource will error in case of metastores with duplicate names. This data source is only available for users & service principals with account admin status + * + * ## Example Usage + * + * Mapping of name to id of all metastores: * * <!--Start PulumiCodeChooser --> *
@@ -7287,12 +8800,8 @@ public static Output getMetastore(GetMetastoreArgs args, Inv
      * import com.pulumi.Context;
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
-     * import com.pulumi.aws.s3.BucketV2;
-     * import com.pulumi.aws.s3.BucketV2Args;
-     * import com.pulumi.databricks.Metastore;
-     * import com.pulumi.databricks.MetastoreArgs;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetMetastoreArgs;
+     * import com.pulumi.databricks.inputs.GetMetastoresArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -7306,23 +8815,9 @@ public static Output getMetastore(GetMetastoreArgs args, Inv
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         var metastore = new BucketV2("metastore", BucketV2Args.builder()
-     *             .bucket(String.format("%s-metastore", prefix))
-     *             .forceDestroy(true)
-     *             .build());
-     * 
-     *         var thisMetastore = new Metastore("thisMetastore", MetastoreArgs.builder()
-     *             .name("primary")
-     *             .storageRoot(metastore.id().applyValue(id -> String.format("s3://%s/metastore", id)))
-     *             .owner(unityAdminGroup)
-     *             .forceDestroy(true)
-     *             .build());
-     * 
-     *         final var this = DatabricksFunctions.getMetastore(GetMetastoreArgs.builder()
-     *             .metastoreId(thisMetastore.id())
-     *             .build());
+     *         final var all = DatabricksFunctions.getMetastores();
      * 
-     *         ctx.export("someMetastore", this_.applyValue(this_ -> this_.metastoreInfo()));
+     *         ctx.export("allMetastores", all.applyValue(getMetastoresResult -> getMetastoresResult.ids()));
      *     }
      * }
      * }
@@ -7333,13 +8828,13 @@ public static Output getMetastore(GetMetastoreArgs args, Inv
      * 
      * The following resources are used in the same context:
      * 
-     * * databricks.getMetastores to get mapping of name to id of all metastores.
+     * * databricks.Metastore to get information about a single metastore.
      * * databricks.Metastore to manage Metastores within Unity Catalog.
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static CompletableFuture getMetastorePlain(GetMetastorePlainArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invokeAsync("databricks:index/getMetastore:getMetastore", TypeShape.of(GetMetastoreResult.class), args, Utilities.withVersion(options));
+    public static Output getMetastores() {
+        return getMetastores(GetMetastoresArgs.Empty, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with an account-level provider!
@@ -7393,8 +8888,8 @@ public static CompletableFuture getMetastorePlain(GetMetasto
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static Output getMetastores() {
-        return getMetastores(GetMetastoresArgs.Empty, InvokeOptions.Empty);
+    public static CompletableFuture getMetastoresPlain() {
+        return getMetastoresPlain(GetMetastoresPlainArgs.Empty, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with an account-level provider!
@@ -7448,8 +8943,8 @@ public static Output getMetastores() {
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static CompletableFuture getMetastoresPlain() {
-        return getMetastoresPlain(GetMetastoresPlainArgs.Empty, InvokeOptions.Empty);
+    public static Output getMetastores(GetMetastoresArgs args) {
+        return getMetastores(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with an account-level provider!
@@ -7503,8 +8998,8 @@ public static CompletableFuture getMetastoresPlain() {
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static Output getMetastores(GetMetastoresArgs args) {
-        return getMetastores(args, InvokeOptions.Empty);
+    public static CompletableFuture getMetastoresPlain(GetMetastoresPlainArgs args) {
+        return getMetastoresPlain(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with an account-level provider!
@@ -7558,8 +9053,8 @@ public static Output getMetastores(GetMetastoresArgs args)
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static CompletableFuture getMetastoresPlain(GetMetastoresPlainArgs args) {
-        return getMetastoresPlain(args, InvokeOptions.Empty);
+    public static Output getMetastores(GetMetastoresArgs args, InvokeOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getMetastores:getMetastores", TypeShape.of(GetMetastoresResult.class), args, Utilities.withVersion(options));
     }
     /**
      * > **Note** This data source can only be used with an account-level provider!
@@ -7613,7 +9108,7 @@ public static CompletableFuture getMetastoresPlain(GetMetas
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static Output getMetastores(GetMetastoresArgs args, InvokeOptions options) {
+    public static Output getMetastores(GetMetastoresArgs args, InvokeOutputOptions options) {
         return Deployment.getInstance().invoke("databricks:index/getMetastores:getMetastores", TypeShape.of(GetMetastoresResult.class), args, Utilities.withVersion(options));
     }
     /**
@@ -7716,6 +9211,15 @@ public static CompletableFuture getMlflowExperimentPl
     public static Output getMlflowExperiment(GetMlflowExperimentArgs args, InvokeOptions options) {
         return Deployment.getInstance().invoke("databricks:index/getMlflowExperiment:getMlflowExperiment", TypeShape.of(GetMlflowExperimentResult.class), args, Utilities.withVersion(options));
     }
+    /**
+     * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
+     * 
+     * Retrieves the settings of databricks.MlflowExperiment by id or name.
+     * 
+     */
+    public static Output getMlflowExperiment(GetMlflowExperimentArgs args, InvokeOutputOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getMlflowExperiment:getMlflowExperiment", TypeShape.of(GetMlflowExperimentResult.class), args, Utilities.withVersion(options));
+    }
     /**
      * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
      * 
@@ -8166,6 +9670,117 @@ public static Output getMlflowModel(GetMlflowModelArgs arg
      * <!--End PulumiCodeChooser -->
      * 
      */
+    public static Output getMlflowModel(GetMlflowModelArgs args, InvokeOutputOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getMlflowModel:getMlflowModel", TypeShape.of(GetMlflowModelResult.class), args, Utilities.withVersion(options));
+    }
+    /**
+     * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
+     * 
+     * Retrieves the settings of databricks.MlflowModel by name.
+     * 
+     * ## Example Usage
+     * 
+     * <!--Start PulumiCodeChooser -->
+     * 
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.MlflowModel;
+     * import com.pulumi.databricks.MlflowModelArgs;
+     * import com.pulumi.databricks.inputs.MlflowModelTagArgs;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetMlflowModelArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         var thisMlflowModel = new MlflowModel("thisMlflowModel", MlflowModelArgs.builder()
+     *             .name("My MLflow Model")
+     *             .description("My MLflow model description")
+     *             .tags(            
+     *                 MlflowModelTagArgs.builder()
+     *                     .key("key1")
+     *                     .value("value1")
+     *                     .build(),
+     *                 MlflowModelTagArgs.builder()
+     *                     .key("key2")
+     *                     .value("value2")
+     *                     .build())
+     *             .build());
+     * 
+     *         final var this = DatabricksFunctions.getMlflowModel(GetMlflowModelArgs.builder()
+     *             .name("My MLflow Model")
+     *             .build());
+     * 
+     *         ctx.export("model", this_);
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetMlflowModelArgs;
+     * import com.pulumi.databricks.ModelServing;
+     * import com.pulumi.databricks.ModelServingArgs;
+     * import com.pulumi.databricks.inputs.ModelServingConfigArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getMlflowModel(GetMlflowModelArgs.builder()
+     *             .name("My MLflow Model with multiple versions")
+     *             .build());
+     * 
+     *         var thisModelServing = new ModelServing("thisModelServing", ModelServingArgs.builder()
+     *             .name("model-serving-endpoint")
+     *             .config(ModelServingConfigArgs.builder()
+     *                 .servedModels(ModelServingConfigServedModelArgs.builder()
+     *                     .name("model_serving_prod")
+     *                     .modelName(this_.name())
+     *                     .modelVersion(this_.latestVersions()[0].version())
+     *                     .workloadSize("Small")
+     *                     .scaleToZeroEnabled(true)
+     *                     .build())
+     *                 .build())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ public static CompletableFuture getMlflowModelPlain(GetMlflowModelPlainArgs args, InvokeOptions options) { return Deployment.getInstance().invokeAsync("databricks:index/getMlflowModel:getMlflowModel", TypeShape.of(GetMlflowModelResult.class), args, Utilities.withVersion(options)); } @@ -8400,7 +10015,97 @@ public static Output getMlflowModels(GetMlflowModelsArgs * import com.pulumi.Pulumi; * import com.pulumi.core.Output; * import com.pulumi.databricks.DatabricksFunctions; - * import com.pulumi.databricks.inputs.GetMlflowModelsArgs; + * import com.pulumi.databricks.inputs.GetMlflowModelsArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var this = DatabricksFunctions.getMlflowModels(); + * + * ctx.export("model", this_); + * } + * } + * } + *
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getMlflowModels(GetMlflowModelsArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getMlflowModels:getMlflowModels", TypeShape.of(GetMlflowModelsResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** This data source could be only used with workspace-level provider! + * + * Retrieves a list of databricks.MlflowModel objects, that were created by Pulumi or manually, so that special handling could be applied. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetMlflowModelsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getMlflowModels();
+     * 
+     *         ctx.export("model", this_);
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getMlflowModelsPlain(GetMlflowModelsPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getMlflowModels:getMlflowModels", TypeShape.of(GetMlflowModelsResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Lists all databricks.MwsCredentials in Databricks Account. + * + * > **Note** `account_id` provider configuration property is required for this resource to work. + * + * ## Example Usage + * + * Listing all credentials in Databricks Account + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetMwsCredentialsArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -8414,18 +10119,29 @@ public static Output getMlflowModels(GetMlflowModelsArgs
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var this = DatabricksFunctions.getMlflowModels();
+     *         final var all = DatabricksFunctions.getMwsCredentials();
      * 
-     *         ctx.export("model", this_);
+     *         ctx.export("allMwsCredentials", all.applyValue(getMwsCredentialsResult -> getMwsCredentialsResult.ids()));
      *     }
      * }
      * }
      * 
* <!--End PulumiCodeChooser --> * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * Provisioning Databricks on AWS guide. + * * databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. + * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). + * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. + * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * */ - public static CompletableFuture getMlflowModelsPlain(GetMlflowModelsPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getMlflowModels:getMlflowModels", TypeShape.of(GetMlflowModelsResult.class), args, Utilities.withVersion(options)); + public static Output getMwsCredentials() { + return getMwsCredentials(GetMwsCredentialsArgs.Empty, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -8482,8 +10198,8 @@ public static CompletableFuture getMlflowModelsPlain(GetM * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ - public static Output getMwsCredentials() { - return getMwsCredentials(GetMwsCredentialsArgs.Empty, InvokeOptions.Empty); + public static CompletableFuture getMwsCredentialsPlain() { + return getMwsCredentialsPlain(GetMwsCredentialsPlainArgs.Empty, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -8540,8 +10256,8 @@ public static Output getMwsCredentials() { * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ - public static CompletableFuture getMwsCredentialsPlain() { - return getMwsCredentialsPlain(GetMwsCredentialsPlainArgs.Empty, InvokeOptions.Empty); + public static Output getMwsCredentials(GetMwsCredentialsArgs args) { + return getMwsCredentials(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -8598,8 +10314,8 @@ public static CompletableFuture getMwsCredentialsPlain( * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ - public static Output getMwsCredentials(GetMwsCredentialsArgs args) { - return getMwsCredentials(args, InvokeOptions.Empty); + public static CompletableFuture getMwsCredentialsPlain(GetMwsCredentialsPlainArgs args) { + return getMwsCredentialsPlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -8656,8 +10372,8 @@ public static Output getMwsCredentials(GetMwsCredential * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ - public static CompletableFuture getMwsCredentialsPlain(GetMwsCredentialsPlainArgs args) { - return getMwsCredentialsPlain(args, InvokeOptions.Empty); + public static Output getMwsCredentials(GetMwsCredentialsArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getMwsCredentials:getMwsCredentials", TypeShape.of(GetMwsCredentialsResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -8714,7 +10430,7 @@ public static CompletableFuture getMwsCredentialsPlain( * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ - public static Output getMwsCredentials(GetMwsCredentialsArgs args, InvokeOptions options) { + public static Output getMwsCredentials(GetMwsCredentialsArgs args, InvokeOutputOptions options) { return Deployment.getInstance().invoke("databricks:index/getMwsCredentials:getMwsCredentials", TypeShape.of(GetMwsCredentialsResult.class), args, Utilities.withVersion(options)); } /** @@ -9045,6 +10761,60 @@ public static CompletableFuture getMwsWorkspacesPlain(Ge public static Output getMwsWorkspaces(GetMwsWorkspacesArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getMwsWorkspaces:getMwsWorkspaces", TypeShape.of(GetMwsWorkspacesResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Lists all databricks.MwsWorkspaces in Databricks Account. + * + * > **Note** `account_id` provider configuration property is required for this resource to work. + * + * ## Example Usage + * + * Listing all workspaces in + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetMwsWorkspacesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getMwsWorkspaces();
+     * 
+     *         ctx.export("allMwsWorkspaces", all.applyValue(getMwsWorkspacesResult -> getMwsWorkspacesResult.ids()));
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. + * * databricks.MetastoreAssignment to assign databricks.Metastore to databricks.MwsWorkspaces or azurerm_databricks_workspace + * + */ + public static Output getMwsWorkspaces(GetMwsWorkspacesArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getMwsWorkspaces:getMwsWorkspaces", TypeShape.of(GetMwsWorkspacesResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * @@ -9570,8 +11340,177 @@ public static Output getNodeType(GetNodeTypeArgs args, Invoke * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. * */ - public static CompletableFuture getNodeTypePlain(GetNodeTypePlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getNodeType:getNodeType", TypeShape.of(GetNodeTypeResult.class), args, Utilities.withVersion(options)); + public static Output getNodeType(GetNodeTypeArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getNodeType:getNodeType", TypeShape.of(GetNodeTypeResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Gets the smallest node type for databricks.Cluster that fits search criteria, like amount of RAM or number of cores. [AWS](https://databricks.com/product/aws-pricing/instance-types) or [Azure](https://azure.microsoft.com/en-us/pricing/details/databricks/). Internally data source fetches [node types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types) available per cloud, similar to executing `databricks clusters list-node-types`, and filters it to return the smallest possible node with criteria. + * + * > **Note** This is experimental functionality, which aims to simplify things. In case of wrong parameters given (e.g. `min_gpus = 876`) or no nodes matching, data source will return cloud-default node type, even though it doesn't match search criteria specified by data source arguments: [i3.xlarge](https://aws.amazon.com/ec2/instance-types/i3/) for AWS or [Standard_D3_v2](https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs#dv2-series) for Azure. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetNodeTypeArgs;
+     * import com.pulumi.databricks.inputs.GetSparkVersionArgs;
+     * import com.pulumi.databricks.Cluster;
+     * import com.pulumi.databricks.ClusterArgs;
+     * import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var withGpu = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
+     *             .localDisk(true)
+     *             .minCores(16)
+     *             .gbPerCore(1)
+     *             .minGpus(1)
+     *             .build());
+     * 
+     *         final var gpuMl = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
+     *             .gpu(true)
+     *             .ml(true)
+     *             .build());
+     * 
+     *         var research = new Cluster("research", ClusterArgs.builder()
+     *             .clusterName("Research Cluster")
+     *             .sparkVersion(gpuMl.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
+     *             .nodeTypeId(withGpu.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
+     *             .autoterminationMinutes(20)
+     *             .autoscale(ClusterAutoscaleArgs.builder()
+     *                 .minWorkers(1)
+     *                 .maxWorkers(50)
+     *                 .build())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * End to end workspace management guide. + * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + * * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. + * * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. + * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + * + */ + public static CompletableFuture getNodeTypePlain(GetNodeTypePlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getNodeType:getNodeType", TypeShape.of(GetNodeTypeResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * This data source allows to export a notebook from Databricks Workspace. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetNotebookArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var features = DatabricksFunctions.getNotebook(GetNotebookArgs.builder()
+     *             .path("/Production/Features")
+     *             .format("SOURCE")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getNotebook(GetNotebookArgs args) { + return getNotebook(args, InvokeOptions.Empty); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * This data source allows to export a notebook from Databricks Workspace. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetNotebookArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var features = DatabricksFunctions.getNotebook(GetNotebookArgs.builder()
+     *             .path("/Production/Features")
+     *             .format("SOURCE")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getNotebookPlain(GetNotebookPlainArgs args) { + return getNotebookPlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -9615,8 +11554,8 @@ public static CompletableFuture getNodeTypePlain(GetNodeTypeP * <!--End PulumiCodeChooser --> * */ - public static Output getNotebook(GetNotebookArgs args) { - return getNotebook(args, InvokeOptions.Empty); + public static Output getNotebook(GetNotebookArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getNotebook:getNotebook", TypeShape.of(GetNotebookResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -9660,8 +11599,8 @@ public static Output getNotebook(GetNotebookArgs args) { * <!--End PulumiCodeChooser --> * */ - public static CompletableFuture getNotebookPlain(GetNotebookPlainArgs args) { - return getNotebookPlain(args, InvokeOptions.Empty); + public static Output getNotebook(GetNotebookArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getNotebook:getNotebook", TypeShape.of(GetNotebookResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -9705,13 +11644,13 @@ public static CompletableFuture getNotebookPlain(GetNotebookP * <!--End PulumiCodeChooser --> * */ - public static Output getNotebook(GetNotebookArgs args, InvokeOptions options) { - return Deployment.getInstance().invoke("databricks:index/getNotebook:getNotebook", TypeShape.of(GetNotebookResult.class), args, Utilities.withVersion(options)); + public static CompletableFuture getNotebookPlain(GetNotebookPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getNotebook:getNotebook", TypeShape.of(GetNotebookResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * - * This data source allows to export a notebook from Databricks Workspace. + * This data source allows to list notebooks in the Databricks Workspace. * * ## Example Usage * @@ -9724,7 +11663,7 @@ public static Output getNotebook(GetNotebookArgs args, Invoke * import com.pulumi.Pulumi; * import com.pulumi.core.Output; * import com.pulumi.databricks.DatabricksFunctions; - * import com.pulumi.databricks.inputs.GetNotebookArgs; + * import com.pulumi.databricks.inputs.GetNotebookPathsArgs; * import java.util.List; * import java.util.ArrayList; * import java.util.Map; @@ -9738,9 +11677,9 @@ public static Output getNotebook(GetNotebookArgs args, Invoke * } * * public static void stack(Context ctx) { - * final var features = DatabricksFunctions.getNotebook(GetNotebookArgs.builder() - * .path("/Production/Features") - * .format("SOURCE") + * final var prod = DatabricksFunctions.getNotebookPaths(GetNotebookPathsArgs.builder() + * .path("/Production") + * .recursive(true) * .build()); * * } @@ -9750,8 +11689,8 @@ public static Output getNotebook(GetNotebookArgs args, Invoke * <!--End PulumiCodeChooser --> * */ - public static CompletableFuture getNotebookPlain(GetNotebookPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getNotebook:getNotebook", TypeShape.of(GetNotebookResult.class), args, Utilities.withVersion(options)); + public static Output getNotebookPaths(GetNotebookPathsArgs args) { + return getNotebookPaths(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -9795,8 +11734,8 @@ public static CompletableFuture getNotebookPlain(GetNotebookP * <!--End PulumiCodeChooser --> * */ - public static Output getNotebookPaths(GetNotebookPathsArgs args) { - return getNotebookPaths(args, InvokeOptions.Empty); + public static CompletableFuture getNotebookPathsPlain(GetNotebookPathsPlainArgs args) { + return getNotebookPathsPlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -9840,8 +11779,8 @@ public static Output getNotebookPaths(GetNotebookPathsAr * <!--End PulumiCodeChooser --> * */ - public static CompletableFuture getNotebookPathsPlain(GetNotebookPathsPlainArgs args) { - return getNotebookPathsPlain(args, InvokeOptions.Empty); + public static Output getNotebookPaths(GetNotebookPathsArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getNotebookPaths:getNotebookPaths", TypeShape.of(GetNotebookPathsResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -9885,7 +11824,7 @@ public static CompletableFuture getNotebookPathsPlain(Ge * <!--End PulumiCodeChooser --> * */ - public static Output getNotebookPaths(GetNotebookPathsArgs args, InvokeOptions options) { + public static Output getNotebookPaths(GetNotebookPathsArgs args, InvokeOutputOptions options) { return Deployment.getInstance().invoke("databricks:index/getNotebookPaths:getNotebookPaths", TypeShape.of(GetNotebookPathsResult.class), args, Utilities.withVersion(options)); } /** @@ -10273,20 +12212,234 @@ public static CompletableFuture getNotificati * .type("EMAIL") * .build()); * - * }}{@code - * }}{@code + * }}{@code + * }}{@code + * } + *
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getNotificationDestinations(GetNotificationDestinationsArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getNotificationDestinations:getNotificationDestinations", TypeShape.of(GetNotificationDestinationsResult.class), args, Utilities.withVersion(options)); + } + /** + * This data source allows you to retrieve information about [Notification Destinations](https://docs.databricks.com/api/workspace/notificationdestinations). Notification Destinations are used to send notifications for query alerts and jobs to external systems such as email, Slack, Microsoft Teams, PagerDuty, or generic webhooks. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.NotificationDestination;
+     * import com.pulumi.databricks.NotificationDestinationArgs;
+     * import com.pulumi.databricks.inputs.NotificationDestinationConfigArgs;
+     * import com.pulumi.databricks.inputs.NotificationDestinationConfigEmailArgs;
+     * import com.pulumi.databricks.inputs.NotificationDestinationConfigSlackArgs;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetNotificationDestinationsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App }{{@code
+     *     public static void main(String[] args) }{{@code
+     *         Pulumi.run(App::stack);
+     *     }}{@code
+     * 
+     *     public static void stack(Context ctx) }{{@code
+     *         var email = new NotificationDestination("email", NotificationDestinationArgs.builder()
+     *             .displayName("Email Destination")
+     *             .config(NotificationDestinationConfigArgs.builder()
+     *                 .email(NotificationDestinationConfigEmailArgs.builder()
+     *                     .addresses("abc}{@literal @}{@code gmail.com")
+     *                     .build())
+     *                 .build())
+     *             .build());
+     * 
+     *         var slack = new NotificationDestination("slack", NotificationDestinationArgs.builder()
+     *             .displayName("Slack Destination")
+     *             .config(NotificationDestinationConfigArgs.builder()
+     *                 .slack(NotificationDestinationConfigSlackArgs.builder()
+     *                     .url("https://hooks.slack.com/services/...")
+     *                     .build())
+     *                 .build())
+     *             .build());
+     * 
+     *         // Lists all notification desitnations
+     *         final var this = DatabricksFunctions.getNotificationDestinations();
+     * 
+     *         // List destinations of specific type and name
+     *         final var filteredNotification = DatabricksFunctions.getNotificationDestinations(GetNotificationDestinationsArgs.builder()
+     *             .displayNameContains("Destination")
+     *             .type("EMAIL")
+     *             .build());
+     * 
+     *     }}{@code
+     * }}{@code
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getNotificationDestinations(GetNotificationDestinationsArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getNotificationDestinations:getNotificationDestinations", TypeShape.of(GetNotificationDestinationsResult.class), args, Utilities.withVersion(options)); + } + /** + * This data source allows you to retrieve information about [Notification Destinations](https://docs.databricks.com/api/workspace/notificationdestinations). Notification Destinations are used to send notifications for query alerts and jobs to external systems such as email, Slack, Microsoft Teams, PagerDuty, or generic webhooks. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.NotificationDestination;
+     * import com.pulumi.databricks.NotificationDestinationArgs;
+     * import com.pulumi.databricks.inputs.NotificationDestinationConfigArgs;
+     * import com.pulumi.databricks.inputs.NotificationDestinationConfigEmailArgs;
+     * import com.pulumi.databricks.inputs.NotificationDestinationConfigSlackArgs;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetNotificationDestinationsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App }{{@code
+     *     public static void main(String[] args) }{{@code
+     *         Pulumi.run(App::stack);
+     *     }}{@code
+     * 
+     *     public static void stack(Context ctx) }{{@code
+     *         var email = new NotificationDestination("email", NotificationDestinationArgs.builder()
+     *             .displayName("Email Destination")
+     *             .config(NotificationDestinationConfigArgs.builder()
+     *                 .email(NotificationDestinationConfigEmailArgs.builder()
+     *                     .addresses("abc}{@literal @}{@code gmail.com")
+     *                     .build())
+     *                 .build())
+     *             .build());
+     * 
+     *         var slack = new NotificationDestination("slack", NotificationDestinationArgs.builder()
+     *             .displayName("Slack Destination")
+     *             .config(NotificationDestinationConfigArgs.builder()
+     *                 .slack(NotificationDestinationConfigSlackArgs.builder()
+     *                     .url("https://hooks.slack.com/services/...")
+     *                     .build())
+     *                 .build())
+     *             .build());
+     * 
+     *         // Lists all notification desitnations
+     *         final var this = DatabricksFunctions.getNotificationDestinations();
+     * 
+     *         // List destinations of specific type and name
+     *         final var filteredNotification = DatabricksFunctions.getNotificationDestinations(GetNotificationDestinationsArgs.builder()
+     *             .displayNameContains("Destination")
+     *             .type("EMAIL")
+     *             .build());
+     * 
+     *     }}{@code
+     * }}{@code
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getNotificationDestinationsPlain(GetNotificationDestinationsPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getNotificationDestinations:getNotificationDestinations", TypeShape.of(GetNotificationDestinationsResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _authentication is not configured for provider_ errors. + * + * Retrieves a list of all databricks.Pipeline ([Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html)) ids deployed in a workspace, or those matching the provided search term. Maximum 100 results. + * + * ## Example Usage + * + * Get all Delta Live Tables pipelines: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetPipelinesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getPipelines();
+     * 
+     *         ctx.export("allPipelines", all.applyValue(getPipelinesResult -> getPipelinesResult.ids()));
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * Filter Delta Live Tables pipelines by name (exact match): + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetPipelinesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getPipelines(GetPipelinesArgs.builder()
+     *             .pipelineName("my_pipeline")
+     *             .build());
+     * 
+     *         ctx.export("myPipeline", this_.ids());
+     *     }
+     * }
      * }
      * 
* <!--End PulumiCodeChooser --> * - */ - public static Output getNotificationDestinations(GetNotificationDestinationsArgs args, InvokeOptions options) { - return Deployment.getInstance().invoke("databricks:index/getNotificationDestinations:getNotificationDestinations", TypeShape.of(GetNotificationDestinationsResult.class), args, Utilities.withVersion(options)); - } - /** - * This data source allows you to retrieve information about [Notification Destinations](https://docs.databricks.com/api/workspace/notificationdestinations). Notification Destinations are used to send notifications for query alerts and jobs to external systems such as email, Slack, Microsoft Teams, PagerDuty, or generic webhooks. - * - * ## Example Usage + * Filter Delta Live Tables pipelines by name (wildcard search): * * <!--Start PulumiCodeChooser --> *
@@ -10296,13 +12449,8 @@ public static Output getNotificationDestinati
      * import com.pulumi.Context;
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
-     * import com.pulumi.databricks.NotificationDestination;
-     * import com.pulumi.databricks.NotificationDestinationArgs;
-     * import com.pulumi.databricks.inputs.NotificationDestinationConfigArgs;
-     * import com.pulumi.databricks.inputs.NotificationDestinationConfigEmailArgs;
-     * import com.pulumi.databricks.inputs.NotificationDestinationConfigSlackArgs;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetNotificationDestinationsArgs;
+     * import com.pulumi.databricks.inputs.GetPipelinesArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -10310,48 +12458,36 @@ public static Output getNotificationDestinati
      * import java.nio.file.Files;
      * import java.nio.file.Paths;
      * 
-     * public class App }{{@code
-     *     public static void main(String[] args) }{{@code
+     * public class App {
+     *     public static void main(String[] args) {
      *         Pulumi.run(App::stack);
-     *     }}{@code
-     * 
-     *     public static void stack(Context ctx) }{{@code
-     *         var email = new NotificationDestination("email", NotificationDestinationArgs.builder()
-     *             .displayName("Email Destination")
-     *             .config(NotificationDestinationConfigArgs.builder()
-     *                 .email(NotificationDestinationConfigEmailArgs.builder()
-     *                     .addresses("abc}{@literal @}{@code gmail.com")
-     *                     .build())
-     *                 .build())
-     *             .build());
-     * 
-     *         var slack = new NotificationDestination("slack", NotificationDestinationArgs.builder()
-     *             .displayName("Slack Destination")
-     *             .config(NotificationDestinationConfigArgs.builder()
-     *                 .slack(NotificationDestinationConfigSlackArgs.builder()
-     *                     .url("https://hooks.slack.com/services/...")
-     *                     .build())
-     *                 .build())
-     *             .build());
-     * 
-     *         // Lists all notification desitnations
-     *         final var this = DatabricksFunctions.getNotificationDestinations();
+     *     }
      * 
-     *         // List destinations of specific type and name
-     *         final var filteredNotification = DatabricksFunctions.getNotificationDestinations(GetNotificationDestinationsArgs.builder()
-     *             .displayNameContains("Destination")
-     *             .type("EMAIL")
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getPipelines(GetPipelinesArgs.builder()
+     *             .pipelineName("%pipeline%")
      *             .build());
      * 
-     *     }}{@code
-     * }}{@code
+     *         ctx.export("wildcardPipelines", this_.ids());
+     *     }
+     * }
      * }
      * 
* <!--End PulumiCodeChooser --> * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * End to end workspace management guide. + * * databricks.Pipeline to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). + * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + * * databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). + * */ - public static CompletableFuture getNotificationDestinationsPlain(GetNotificationDestinationsPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getNotificationDestinations:getNotificationDestinations", TypeShape.of(GetNotificationDestinationsResult.class), args, Utilities.withVersion(options)); + public static Output getPipelines() { + return getPipelines(GetPipelinesArgs.Empty, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _authentication is not configured for provider_ errors. @@ -10477,8 +12613,8 @@ public static CompletableFuture getNotificati * * databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * */ - public static Output getPipelines() { - return getPipelines(GetPipelinesArgs.Empty, InvokeOptions.Empty); + public static CompletableFuture getPipelinesPlain() { + return getPipelinesPlain(GetPipelinesPlainArgs.Empty, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _authentication is not configured for provider_ errors. @@ -10604,8 +12740,8 @@ public static Output getPipelines() { * * databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * */ - public static CompletableFuture getPipelinesPlain() { - return getPipelinesPlain(GetPipelinesPlainArgs.Empty, InvokeOptions.Empty); + public static Output getPipelines(GetPipelinesArgs args) { + return getPipelines(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _authentication is not configured for provider_ errors. @@ -10731,8 +12867,8 @@ public static CompletableFuture getPipelinesPlain() { * * databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * */ - public static Output getPipelines(GetPipelinesArgs args) { - return getPipelines(args, InvokeOptions.Empty); + public static CompletableFuture getPipelinesPlain(GetPipelinesPlainArgs args) { + return getPipelinesPlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _authentication is not configured for provider_ errors. @@ -10858,8 +12994,8 @@ public static Output getPipelines(GetPipelinesArgs args) { * * databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * */ - public static CompletableFuture getPipelinesPlain(GetPipelinesPlainArgs args) { - return getPipelinesPlain(args, InvokeOptions.Empty); + public static Output getPipelines(GetPipelinesArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getPipelines:getPipelines", TypeShape.of(GetPipelinesResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _authentication is not configured for provider_ errors. @@ -10985,7 +13121,7 @@ public static CompletableFuture getPipelinesPlain(GetPipelin * * databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * */ - public static Output getPipelines(GetPipelinesArgs args, InvokeOptions options) { + public static Output getPipelines(GetPipelinesArgs args, InvokeOutputOptions options) { return Deployment.getInstance().invoke("databricks:index/getPipelines:getPipelines", TypeShape.of(GetPipelinesResult.class), args, Utilities.withVersion(options)); } /** @@ -11272,12 +13408,179 @@ public static Output getRegisteredModel(GetRegisteredM return Deployment.getInstance().invoke("databricks:index/getRegisteredModel:getRegisteredModel", TypeShape.of(GetRegisteredModelResult.class), args, Utilities.withVersion(options)); } /** - * > This resource can only be used with a workspace-level provider! - * - * This resource allows you to get information about [Model in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. + * > This resource can only be used with a workspace-level provider! + * + * This resource allows you to get information about [Model in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetRegisteredModelArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getRegisteredModel(GetRegisteredModelArgs.builder()
+     *             .fullName("main.default.my_model")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are often used in the same context: + * + * * databricks.RegisteredModel resource to manage models within Unity Catalog. + * * databricks.ModelServing to serve this model on a Databricks serving endpoint. + * * databricks.MlflowExperiment to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks. + * + */ + public static Output getRegisteredModel(GetRegisteredModelArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getRegisteredModel:getRegisteredModel", TypeShape.of(GetRegisteredModelResult.class), args, Utilities.withVersion(options)); + } + /** + * > This resource can only be used with a workspace-level provider! + * + * This resource allows you to get information about [Model in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetRegisteredModelArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getRegisteredModel(GetRegisteredModelArgs.builder()
+     *             .fullName("main.default.my_model")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are often used in the same context: + * + * * databricks.RegisteredModel resource to manage models within Unity Catalog. + * * databricks.ModelServing to serve this model on a Databricks serving endpoint. + * * databricks.MlflowExperiment to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks. + * + */ + public static CompletableFuture getRegisteredModelPlain(GetRegisteredModelPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getRegisteredModel:getRegisteredModel", TypeShape.of(GetRegisteredModelResult.class), args, Utilities.withVersion(options)); + } + /** + * Retrieves details about databricks.Schema that was created by Pulumi or manually. + * A schema can be identified by its two-level (fully qualified) name (in the form of: `catalog_name`.`schema_name`) as input. This can be retrieved programmatically using databricks.getSchemas data source. + * + * ## Example Usage + * + * * Retrieve details of all schemas in in a _sandbox_ databricks_catalog: + * + * <!--Start PulumiCodeChooser --> + * <!--End PulumiCodeChooser --> + * + * * Search for a specific schema by its fully qualified name: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetSchemaArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getSchema(GetSchemaArgs.builder()
+     *             .name("catalog.schema")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Schema to manage schemas within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getSchema(GetSchemaArgs args) { + return getSchema(args, InvokeOptions.Empty); + } + /** + * Retrieves details about databricks.Schema that was created by Pulumi or manually. + * A schema can be identified by its two-level (fully qualified) name (in the form of: `catalog_name`.`schema_name`) as input. This can be retrieved programmatically using databricks.getSchemas data source. * * ## Example Usage * + * * Retrieve details of all schemas in in a _sandbox_ databricks_catalog: + * + * <!--Start PulumiCodeChooser --> + * <!--End PulumiCodeChooser --> + * + * * Search for a specific schema by its fully qualified name: + * * <!--Start PulumiCodeChooser --> *
      * {@code
@@ -11287,7 +13590,7 @@ public static Output getRegisteredModel(GetRegisteredM
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetRegisteredModelArgs;
+     * import com.pulumi.databricks.inputs.GetSchemaArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -11301,8 +13604,8 @@ public static Output getRegisteredModel(GetRegisteredM
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var this = DatabricksFunctions.getRegisteredModel(GetRegisteredModelArgs.builder()
-     *             .fullName("main.default.my_model")
+     *         final var this = DatabricksFunctions.getSchema(GetSchemaArgs.builder()
+     *             .name("catalog.schema")
      *             .build());
      * 
      *     }
@@ -11313,15 +13616,14 @@ public static Output getRegisteredModel(GetRegisteredM
      * 
      * ## Related Resources
      * 
-     * The following resources are often used in the same context:
+     * The following resources are used in the same context:
      * 
-     * * databricks.RegisteredModel resource to manage models within Unity Catalog.
-     * * databricks.ModelServing to serve this model on a Databricks serving endpoint.
-     * * databricks.MlflowExperiment to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks.
+     * * databricks.Schema to manage schemas within Unity Catalog.
+     * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static CompletableFuture getRegisteredModelPlain(GetRegisteredModelPlainArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invokeAsync("databricks:index/getRegisteredModel:getRegisteredModel", TypeShape.of(GetRegisteredModelResult.class), args, Utilities.withVersion(options));
+    public static CompletableFuture getSchemaPlain(GetSchemaPlainArgs args) {
+        return getSchemaPlain(args, InvokeOptions.Empty);
     }
     /**
      * Retrieves details about databricks.Schema that was created by Pulumi or manually.
@@ -11377,8 +13679,8 @@ public static CompletableFuture getRegisteredModelPlai
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static Output getSchema(GetSchemaArgs args) {
-        return getSchema(args, InvokeOptions.Empty);
+    public static Output getSchema(GetSchemaArgs args, InvokeOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getSchema:getSchema", TypeShape.of(GetSchemaResult.class), args, Utilities.withVersion(options));
     }
     /**
      * Retrieves details about databricks.Schema that was created by Pulumi or manually.
@@ -11434,8 +13736,8 @@ public static Output getSchema(GetSchemaArgs args) {
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static CompletableFuture getSchemaPlain(GetSchemaPlainArgs args) {
-        return getSchemaPlain(args, InvokeOptions.Empty);
+    public static Output getSchema(GetSchemaArgs args, InvokeOutputOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getSchema:getSchema", TypeShape.of(GetSchemaResult.class), args, Utilities.withVersion(options));
     }
     /**
      * Retrieves details about databricks.Schema that was created by Pulumi or manually.
@@ -11491,21 +13793,19 @@ public static CompletableFuture getSchemaPlain(GetSchemaPlainAr
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static Output getSchema(GetSchemaArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invoke("databricks:index/getSchema:getSchema", TypeShape.of(GetSchemaResult.class), args, Utilities.withVersion(options));
+    public static CompletableFuture getSchemaPlain(GetSchemaPlainArgs args, InvokeOptions options) {
+        return Deployment.getInstance().invokeAsync("databricks:index/getSchema:getSchema", TypeShape.of(GetSchemaResult.class), args, Utilities.withVersion(options));
     }
     /**
-     * Retrieves details about databricks.Schema that was created by Pulumi or manually.
-     * A schema can be identified by its two-level (fully qualified) name (in the form of: `catalog_name`.`schema_name`) as input. This can be retrieved programmatically using databricks.getSchemas data source.
+     * > **Note** This data source can only be used with a workspace-level provider!
      * 
-     * ## Example Usage
+     * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
      * 
-     * * Retrieve details of all schemas in in a _sandbox_ databricks_catalog:
+     * Retrieves a list of databricks.Schema ids, that were created by Pulumi or manually, so that special handling could be applied.
      * 
-     * <!--Start PulumiCodeChooser -->
-     * <!--End PulumiCodeChooser -->
+     * ## Example Usage
      * 
-     * * Search for a specific schema by its fully qualified name:
+     * Listing all schemas in a _sandbox_ databricks_catalog:
      * 
      * <!--Start PulumiCodeChooser -->
      * 
@@ -11516,7 +13816,7 @@ public static Output getSchema(GetSchemaArgs args, InvokeOption
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetSchemaArgs;
+     * import com.pulumi.databricks.inputs.GetSchemasArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -11530,10 +13830,11 @@ public static Output getSchema(GetSchemaArgs args, InvokeOption
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var this = DatabricksFunctions.getSchema(GetSchemaArgs.builder()
-     *             .name("catalog.schema")
+     *         final var sandbox = DatabricksFunctions.getSchemas(GetSchemasArgs.builder()
+     *             .catalogName("sandbox")
      *             .build());
      * 
+     *         ctx.export("allSandboxSchemas", sandbox.applyValue(getSchemasResult -> getSchemasResult));
      *     }
      * }
      * }
@@ -11548,8 +13849,8 @@ public static Output getSchema(GetSchemaArgs args, InvokeOption
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static CompletableFuture getSchemaPlain(GetSchemaPlainArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invokeAsync("databricks:index/getSchema:getSchema", TypeShape.of(GetSchemaResult.class), args, Utilities.withVersion(options));
+    public static Output getSchemas(GetSchemasArgs args) {
+        return getSchemas(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -11604,8 +13905,8 @@ public static CompletableFuture getSchemaPlain(GetSchemaPlainAr
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static Output getSchemas(GetSchemasArgs args) {
-        return getSchemas(args, InvokeOptions.Empty);
+    public static CompletableFuture getSchemasPlain(GetSchemasPlainArgs args) {
+        return getSchemasPlain(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -11660,8 +13961,8 @@ public static Output getSchemas(GetSchemasArgs args) {
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static CompletableFuture getSchemasPlain(GetSchemasPlainArgs args) {
-        return getSchemasPlain(args, InvokeOptions.Empty);
+    public static Output getSchemas(GetSchemasArgs args, InvokeOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getSchemas:getSchemas", TypeShape.of(GetSchemasResult.class), args, Utilities.withVersion(options));
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -11716,7 +14017,7 @@ public static CompletableFuture getSchemasPlain(GetSchemasPlai
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static Output getSchemas(GetSchemasArgs args, InvokeOptions options) {
+    public static Output getSchemas(GetSchemasArgs args, InvokeOutputOptions options) {
         return Deployment.getInstance().invoke("databricks:index/getSchemas:getSchemas", TypeShape.of(GetSchemasResult.class), args, Utilities.withVersion(options));
     }
     /**
@@ -12130,6 +14431,77 @@ public static CompletableFuture getServicePrincipalPl
     public static Output getServicePrincipal(GetServicePrincipalArgs args, InvokeOptions options) {
         return Deployment.getInstance().invoke("databricks:index/getServicePrincipal:getServicePrincipal", TypeShape.of(GetServicePrincipalResult.class), args, Utilities.withVersion(options));
     }
+    /**
+     * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
+     * 
+     * Retrieves information about databricks_service_principal.
+     * 
+     * ## Example Usage
+     * 
+     * Adding service principal `11111111-2222-3333-4444-555666777888` to administrative group
+     * 
+     * <!--Start PulumiCodeChooser -->
+     * 
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetGroupArgs;
+     * import com.pulumi.databricks.inputs.GetServicePrincipalArgs;
+     * import com.pulumi.databricks.GroupMember;
+     * import com.pulumi.databricks.GroupMemberArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var admins = DatabricksFunctions.getGroup(GetGroupArgs.builder()
+     *             .displayName("admins")
+     *             .build());
+     * 
+     *         final var spn = DatabricksFunctions.getServicePrincipal(GetServicePrincipalArgs.builder()
+     *             .applicationId("11111111-2222-3333-4444-555666777888")
+     *             .build());
+     * 
+     *         var myMemberA = new GroupMember("myMemberA", GroupMemberArgs.builder()
+     *             .groupId(admins.applyValue(getGroupResult -> getGroupResult.id()))
+     *             .memberId(spn.applyValue(getServicePrincipalResult -> getServicePrincipalResult.id()))
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * - End to end workspace management guide. + * - databricks.getCurrentUser data to retrieve information about databricks.User or databricks_service_principal, that is calling Databricks REST API. + * - databricks.Group to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). + * - databricks.Group data to retrieve information about databricks.Group members, entitlements and instance profiles. + * - databricks.GroupInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_group. + * - databricks.GroupMember to attach users and groups as group members. + * - databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. + * - databricks_service principal to manage service principals + * + */ + public static Output getServicePrincipal(GetServicePrincipalArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getServicePrincipal:getServicePrincipal", TypeShape.of(GetServicePrincipalResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * @@ -12252,8 +14624,123 @@ public static Output getServicePrincipals(GetService * Retrieves `application_ids` of all databricks.ServicePrincipal based on their `display_name` * */ - public static CompletableFuture getServicePrincipalsPlain(GetServicePrincipalsPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getServicePrincipals:getServicePrincipals", TypeShape.of(GetServicePrincipalsResult.class), args, Utilities.withVersion(options)); + public static Output getServicePrincipals(GetServicePrincipalsArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getServicePrincipals:getServicePrincipals", TypeShape.of(GetServicePrincipalsResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves `application_ids` of all databricks.ServicePrincipal based on their `display_name` + * + */ + public static CompletableFuture getServicePrincipalsPlain(GetServicePrincipalsPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getServicePrincipals:getServicePrincipals", TypeShape.of(GetServicePrincipalsResult.class), args, Utilities.withVersion(options)); + } + /** + * Retrieves details about a databricks.Share that were created by Pulumi or manually. + * + * ## Example Usage + * + * Getting details of an existing share in the metastore + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetShareArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getShare(GetShareArgs.builder()
+     *             .name("this")
+     *             .build());
+     * 
+     *         ctx.export("createdBy", this_.createdBy());
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Share to create Delta Sharing shares. + * * databricks.Recipient to create Delta Sharing recipients. + * * databricks.Grants to manage Delta Sharing permissions. + * + */ + public static Output getShare() { + return getShare(GetShareArgs.Empty, InvokeOptions.Empty); + } + /** + * Retrieves details about a databricks.Share that were created by Pulumi or manually. + * + * ## Example Usage + * + * Getting details of an existing share in the metastore + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetShareArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getShare(GetShareArgs.builder()
+     *             .name("this")
+     *             .build());
+     * 
+     *         ctx.export("createdBy", this_.createdBy());
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Share to create Delta Sharing shares. + * * databricks.Recipient to create Delta Sharing recipients. + * * databricks.Grants to manage Delta Sharing permissions. + * + */ + public static CompletableFuture getSharePlain() { + return getSharePlain(GetSharePlainArgs.Empty, InvokeOptions.Empty); } /** * Retrieves details about a databricks.Share that were created by Pulumi or manually. @@ -12305,8 +14792,8 @@ public static CompletableFuture getServicePrincipals * * databricks.Grants to manage Delta Sharing permissions. * */ - public static Output getShare() { - return getShare(GetShareArgs.Empty, InvokeOptions.Empty); + public static Output getShare(GetShareArgs args) { + return getShare(args, InvokeOptions.Empty); } /** * Retrieves details about a databricks.Share that were created by Pulumi or manually. @@ -12358,8 +14845,8 @@ public static Output getShare() { * * databricks.Grants to manage Delta Sharing permissions. * */ - public static CompletableFuture getSharePlain() { - return getSharePlain(GetSharePlainArgs.Empty, InvokeOptions.Empty); + public static CompletableFuture getSharePlain(GetSharePlainArgs args) { + return getSharePlain(args, InvokeOptions.Empty); } /** * Retrieves details about a databricks.Share that were created by Pulumi or manually. @@ -12411,8 +14898,8 @@ public static CompletableFuture getSharePlain() { * * databricks.Grants to manage Delta Sharing permissions. * */ - public static Output getShare(GetShareArgs args) { - return getShare(args, InvokeOptions.Empty); + public static Output getShare(GetShareArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getShare:getShare", TypeShape.of(GetShareResult.class), args, Utilities.withVersion(options)); } /** * Retrieves details about a databricks.Share that were created by Pulumi or manually. @@ -12464,8 +14951,8 @@ public static Output getShare(GetShareArgs args) { * * databricks.Grants to manage Delta Sharing permissions. * */ - public static CompletableFuture getSharePlain(GetSharePlainArgs args) { - return getSharePlain(args, InvokeOptions.Empty); + public static Output getShare(GetShareArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getShare:getShare", TypeShape.of(GetShareResult.class), args, Utilities.withVersion(options)); } /** * Retrieves details about a databricks.Share that were created by Pulumi or manually. @@ -12517,15 +15004,15 @@ public static CompletableFuture getSharePlain(GetSharePlainArgs * * databricks.Grants to manage Delta Sharing permissions. * */ - public static Output getShare(GetShareArgs args, InvokeOptions options) { - return Deployment.getInstance().invoke("databricks:index/getShare:getShare", TypeShape.of(GetShareResult.class), args, Utilities.withVersion(options)); + public static CompletableFuture getSharePlain(GetSharePlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getShare:getShare", TypeShape.of(GetShareResult.class), args, Utilities.withVersion(options)); } /** - * Retrieves details about a databricks.Share that were created by Pulumi or manually. + * Retrieves a list of databricks.Share name, that were created by Pulumi or manually. * * ## Example Usage * - * Getting details of an existing share in the metastore + * Getting all existing shares in the metastore * * <!--Start PulumiCodeChooser --> *
@@ -12536,7 +15023,7 @@ public static Output getShare(GetShareArgs args, InvokeOptions o
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetShareArgs;
+     * import com.pulumi.databricks.inputs.GetSharesArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -12550,11 +15037,9 @@ public static Output getShare(GetShareArgs args, InvokeOptions o
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var this = DatabricksFunctions.getShare(GetShareArgs.builder()
-     *             .name("this")
-     *             .build());
+     *         final var this = DatabricksFunctions.getShares();
      * 
-     *         ctx.export("createdBy", this_.createdBy());
+     *         ctx.export("shareName", this_.shares());
      *     }
      * }
      * }
@@ -12570,8 +15055,8 @@ public static Output getShare(GetShareArgs args, InvokeOptions o
      * * databricks.Grants to manage Delta Sharing permissions.
      * 
      */
-    public static CompletableFuture getSharePlain(GetSharePlainArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invokeAsync("databricks:index/getShare:getShare", TypeShape.of(GetShareResult.class), args, Utilities.withVersion(options));
+    public static Output getShares() {
+        return getShares(GetSharesArgs.Empty, InvokeOptions.Empty);
     }
     /**
      * Retrieves a list of databricks.Share name, that were created by Pulumi or manually.
@@ -12621,8 +15106,8 @@ public static CompletableFuture getSharePlain(GetSharePlainArgs
      * * databricks.Grants to manage Delta Sharing permissions.
      * 
      */
-    public static Output getShares() {
-        return getShares(GetSharesArgs.Empty, InvokeOptions.Empty);
+    public static CompletableFuture getSharesPlain() {
+        return getSharesPlain(GetSharesPlainArgs.Empty, InvokeOptions.Empty);
     }
     /**
      * Retrieves a list of databricks.Share name, that were created by Pulumi or manually.
@@ -12672,8 +15157,8 @@ public static Output getShares() {
      * * databricks.Grants to manage Delta Sharing permissions.
      * 
      */
-    public static CompletableFuture getSharesPlain() {
-        return getSharesPlain(GetSharesPlainArgs.Empty, InvokeOptions.Empty);
+    public static Output getShares(GetSharesArgs args) {
+        return getShares(args, InvokeOptions.Empty);
     }
     /**
      * Retrieves a list of databricks.Share name, that were created by Pulumi or manually.
@@ -12723,8 +15208,8 @@ public static CompletableFuture getSharesPlain() {
      * * databricks.Grants to manage Delta Sharing permissions.
      * 
      */
-    public static Output getShares(GetSharesArgs args) {
-        return getShares(args, InvokeOptions.Empty);
+    public static CompletableFuture getSharesPlain(GetSharesPlainArgs args) {
+        return getSharesPlain(args, InvokeOptions.Empty);
     }
     /**
      * Retrieves a list of databricks.Share name, that were created by Pulumi or manually.
@@ -12774,8 +15259,8 @@ public static Output getShares(GetSharesArgs args) {
      * * databricks.Grants to manage Delta Sharing permissions.
      * 
      */
-    public static CompletableFuture getSharesPlain(GetSharesPlainArgs args) {
-        return getSharesPlain(args, InvokeOptions.Empty);
+    public static Output getShares(GetSharesArgs args, InvokeOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getShares:getShares", TypeShape.of(GetSharesResult.class), args, Utilities.withVersion(options));
     }
     /**
      * Retrieves a list of databricks.Share name, that were created by Pulumi or manually.
@@ -12825,7 +15310,7 @@ public static CompletableFuture getSharesPlain(GetSharesPlainAr
      * * databricks.Grants to manage Delta Sharing permissions.
      * 
      */
-    public static Output getShares(GetSharesArgs args, InvokeOptions options) {
+    public static Output getShares(GetSharesArgs args, InvokeOutputOptions options) {
         return Deployment.getInstance().invoke("databricks:index/getShares:getShares", TypeShape.of(GetSharesResult.class), args, Utilities.withVersion(options));
     }
     /**
@@ -13341,17 +15826,218 @@ public static Output getSparkVersion(GetSparkVersionArgs
      * 
      * ## Related Resources
      * 
-     * The following resources are used in the same context:
+     * The following resources are used in the same context:
+     * 
+     * * End to end workspace management guide.
+     * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html).
+     * * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules.
+     * * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances.
+     * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
+     * 
+     */
+    public static Output getSparkVersion(GetSparkVersionArgs args, InvokeOutputOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getSparkVersion:getSparkVersion", TypeShape.of(GetSparkVersionResult.class), args, Utilities.withVersion(options));
+    }
+    /**
+     * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
+     * 
+     * Gets [Databricks Runtime (DBR)](https://docs.databricks.com/runtime/dbr.html) version that could be used for `spark_version` parameter in databricks.Cluster and other resources that fits search criteria, like specific Spark or Scala version, ML or Genomics runtime, etc., similar to executing `databricks clusters spark-versions`, and filters it to return the latest version that matches criteria. Often used along databricks.getNodeType data source.
+     * 
+     * > **Note** This is experimental functionality, which aims to simplify things. In case of wrong parameters given (e.g. together `ml = true` and `genomics = true`, or something like), data source will throw an error.  Similarly, if search returns multiple results, and `latest = false`, data source will throw an error.
+     * 
+     * ## Example Usage
+     * 
+     * <!--Start PulumiCodeChooser -->
+     * 
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetNodeTypeArgs;
+     * import com.pulumi.databricks.inputs.GetSparkVersionArgs;
+     * import com.pulumi.databricks.Cluster;
+     * import com.pulumi.databricks.ClusterArgs;
+     * import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var withGpu = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
+     *             .localDisk(true)
+     *             .minCores(16)
+     *             .gbPerCore(1)
+     *             .minGpus(1)
+     *             .build());
+     * 
+     *         final var gpuMl = DatabricksFunctions.getSparkVersion(GetSparkVersionArgs.builder()
+     *             .gpu(true)
+     *             .ml(true)
+     *             .build());
+     * 
+     *         var research = new Cluster("research", ClusterArgs.builder()
+     *             .clusterName("Research Cluster")
+     *             .sparkVersion(gpuMl.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
+     *             .nodeTypeId(withGpu.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
+     *             .autoterminationMinutes(20)
+     *             .autoscale(ClusterAutoscaleArgs.builder()
+     *                 .minWorkers(1)
+     *                 .maxWorkers(50)
+     *                 .build())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * End to end workspace management guide. + * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). + * * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. + * * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. + * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + * + */ + public static CompletableFuture getSparkVersionPlain(GetSparkVersionPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getSparkVersion:getSparkVersion", TypeShape.of(GetSparkVersionResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves information about a databricks.getSqlWarehouse using its id. This could be retrieved programmatically using databricks.getSqlWarehouses data source. + * + * ## Example Usage + * + * * Retrieve attributes of each SQL warehouses in a workspace: + * + * <!--Start PulumiCodeChooser --> + * <!--End PulumiCodeChooser --> + * + * * Search for a specific SQL Warehouse by name: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetSqlWarehouseArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getSqlWarehouse(GetSqlWarehouseArgs.builder()
+     *             .name("Starter Warehouse")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related resources + * + * The following resources are often used in the same context: + * + * * End to end workspace management guide. + * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. + * * databricks.SqlDashboard to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). + * * databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all databricks.getSqlWarehouse of workspace. + * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). + * + */ + public static Output getSqlWarehouse() { + return getSqlWarehouse(GetSqlWarehouseArgs.Empty, InvokeOptions.Empty); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves information about a databricks.getSqlWarehouse using its id. This could be retrieved programmatically using databricks.getSqlWarehouses data source. + * + * ## Example Usage + * + * * Retrieve attributes of each SQL warehouses in a workspace: + * + * <!--Start PulumiCodeChooser --> + * <!--End PulumiCodeChooser --> + * + * * Search for a specific SQL Warehouse by name: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetSqlWarehouseArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getSqlWarehouse(GetSqlWarehouseArgs.builder()
+     *             .name("Starter Warehouse")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related resources + * + * The following resources are often used in the same context: * * * End to end workspace management guide. - * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). - * * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. - * * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. - * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. + * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. + * * databricks.SqlDashboard to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). + * * databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all databricks.getSqlWarehouse of workspace. + * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static CompletableFuture getSparkVersionPlain(GetSparkVersionPlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getSparkVersion:getSparkVersion", TypeShape.of(GetSparkVersionResult.class), args, Utilities.withVersion(options)); + public static CompletableFuture getSqlWarehousePlain() { + return getSqlWarehousePlain(GetSqlWarehousePlainArgs.Empty, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -13411,8 +16097,8 @@ public static CompletableFuture getSparkVersionPlain(GetS * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static Output getSqlWarehouse() { - return getSqlWarehouse(GetSqlWarehouseArgs.Empty, InvokeOptions.Empty); + public static Output getSqlWarehouse(GetSqlWarehouseArgs args) { + return getSqlWarehouse(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -13472,8 +16158,8 @@ public static Output getSqlWarehouse() { * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static CompletableFuture getSqlWarehousePlain() { - return getSqlWarehousePlain(GetSqlWarehousePlainArgs.Empty, InvokeOptions.Empty); + public static CompletableFuture getSqlWarehousePlain(GetSqlWarehousePlainArgs args) { + return getSqlWarehousePlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -13533,8 +16219,8 @@ public static CompletableFuture getSqlWarehousePlain() { * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static Output getSqlWarehouse(GetSqlWarehouseArgs args) { - return getSqlWarehouse(args, InvokeOptions.Empty); + public static Output getSqlWarehouse(GetSqlWarehouseArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getSqlWarehouse:getSqlWarehouse", TypeShape.of(GetSqlWarehouseResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -13594,8 +16280,8 @@ public static Output getSqlWarehouse(GetSqlWarehouseArgs * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static CompletableFuture getSqlWarehousePlain(GetSqlWarehousePlainArgs args) { - return getSqlWarehousePlain(args, InvokeOptions.Empty); + public static Output getSqlWarehouse(GetSqlWarehouseArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getSqlWarehouse:getSqlWarehouse", TypeShape.of(GetSqlWarehouseResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -13655,22 +16341,50 @@ public static CompletableFuture getSqlWarehousePlain(GetS * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static Output getSqlWarehouse(GetSqlWarehouseArgs args, InvokeOptions options) { - return Deployment.getInstance().invoke("databricks:index/getSqlWarehouse:getSqlWarehouse", TypeShape.of(GetSqlWarehouseResult.class), args, Utilities.withVersion(options)); + public static CompletableFuture getSqlWarehousePlain(GetSqlWarehousePlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getSqlWarehouse:getSqlWarehouse", TypeShape.of(GetSqlWarehouseResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * - * Retrieves information about a databricks.getSqlWarehouse using its id. This could be retrieved programmatically using databricks.getSqlWarehouses data source. + * Retrieves a list of databricks.SqlEndpoint ids, that were created by Pulumi or manually. * * ## Example Usage * - * * Retrieve attributes of each SQL warehouses in a workspace: + * Retrieve IDs for all SQL warehouses: * * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetSqlWarehousesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var all = DatabricksFunctions.getSqlWarehouses();
+     * 
+     *     }
+     * }
+     * }
+     * 
* <!--End PulumiCodeChooser --> * - * * Search for a specific SQL Warehouse by name: + * Retrieve IDs for all clusters having "Shared" in the warehouse name: * * <!--Start PulumiCodeChooser --> *
@@ -13681,7 +16395,7 @@ public static Output getSqlWarehouse(GetSqlWarehouseArgs
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetSqlWarehouseArgs;
+     * import com.pulumi.databricks.inputs.GetSqlWarehousesArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -13695,8 +16409,8 @@ public static Output getSqlWarehouse(GetSqlWarehouseArgs
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var all = DatabricksFunctions.getSqlWarehouse(GetSqlWarehouseArgs.builder()
-     *             .name("Starter Warehouse")
+     *         final var allShared = DatabricksFunctions.getSqlWarehouses(GetSqlWarehousesArgs.builder()
+     *             .warehouseNameContains("shared")
      *             .build());
      * 
      *     }
@@ -13705,7 +16419,7 @@ public static Output getSqlWarehouse(GetSqlWarehouseArgs
      * 
* <!--End PulumiCodeChooser --> * - * ## Related resources + * ## Related Resources * * The following resources are often used in the same context: * @@ -13716,8 +16430,8 @@ public static Output getSqlWarehouse(GetSqlWarehouseArgs * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static CompletableFuture getSqlWarehousePlain(GetSqlWarehousePlainArgs args, InvokeOptions options) { - return Deployment.getInstance().invokeAsync("databricks:index/getSqlWarehouse:getSqlWarehouse", TypeShape.of(GetSqlWarehouseResult.class), args, Utilities.withVersion(options)); + public static Output getSqlWarehouses() { + return getSqlWarehouses(GetSqlWarehousesArgs.Empty, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -13805,8 +16519,8 @@ public static CompletableFuture getSqlWarehousePlain(GetS * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static Output getSqlWarehouses() { - return getSqlWarehouses(GetSqlWarehousesArgs.Empty, InvokeOptions.Empty); + public static CompletableFuture getSqlWarehousesPlain() { + return getSqlWarehousesPlain(GetSqlWarehousesPlainArgs.Empty, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -13894,8 +16608,8 @@ public static Output getSqlWarehouses() { * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static CompletableFuture getSqlWarehousesPlain() { - return getSqlWarehousesPlain(GetSqlWarehousesPlainArgs.Empty, InvokeOptions.Empty); + public static Output getSqlWarehouses(GetSqlWarehousesArgs args) { + return getSqlWarehouses(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -13983,8 +16697,8 @@ public static CompletableFuture getSqlWarehousesPlain() * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static Output getSqlWarehouses(GetSqlWarehousesArgs args) { - return getSqlWarehouses(args, InvokeOptions.Empty); + public static CompletableFuture getSqlWarehousesPlain(GetSqlWarehousesPlainArgs args) { + return getSqlWarehousesPlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -14072,8 +16786,8 @@ public static Output getSqlWarehouses(GetSqlWarehousesAr * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static CompletableFuture getSqlWarehousesPlain(GetSqlWarehousesPlainArgs args) { - return getSqlWarehousesPlain(args, InvokeOptions.Empty); + public static Output getSqlWarehouses(GetSqlWarehousesArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getSqlWarehouses:getSqlWarehouses", TypeShape.of(GetSqlWarehousesResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -14161,7 +16875,7 @@ public static CompletableFuture getSqlWarehousesPlain(Ge * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * */ - public static Output getSqlWarehouses(GetSqlWarehousesArgs args, InvokeOptions options) { + public static Output getSqlWarehouses(GetSqlWarehousesArgs args, InvokeOutputOptions options) { return Deployment.getInstance().invoke("databricks:index/getSqlWarehouses:getSqlWarehouses", TypeShape.of(GetSqlWarehousesResult.class), args, Utilities.withVersion(options)); } /** @@ -14304,8 +17018,170 @@ public static CompletableFuture getSqlWarehousesPlain(Ge * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog. * */ - public static Output getStorageCredential(GetStorageCredentialArgs args) { - return getStorageCredential(args, InvokeOptions.Empty); + public static Output getStorageCredential(GetStorageCredentialArgs args) { + return getStorageCredential(args, InvokeOptions.Empty); + } + /** + * > **Note** This data source can only be used with a workspace-level provider! + * + * Retrieves details about a databricks.StorageCredential that were created by Pulumi or manually. + * + * ## Example Usage + * + * Getting details of an existing storage credential in the metastore + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetStorageCredentialArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getStorageCredential(GetStorageCredentialArgs.builder()
+     *             .name("this")
+     *             .build());
+     * 
+     *         ctx.export("createdBy", this_.storageCredentialInfo().createdBy());
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.getStorageCredentials to get names of all credentials + * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog. + * + */ + public static CompletableFuture getStorageCredentialPlain(GetStorageCredentialPlainArgs args) { + return getStorageCredentialPlain(args, InvokeOptions.Empty); + } + /** + * > **Note** This data source can only be used with a workspace-level provider! + * + * Retrieves details about a databricks.StorageCredential that were created by Pulumi or manually. + * + * ## Example Usage + * + * Getting details of an existing storage credential in the metastore + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetStorageCredentialArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getStorageCredential(GetStorageCredentialArgs.builder()
+     *             .name("this")
+     *             .build());
+     * 
+     *         ctx.export("createdBy", this_.storageCredentialInfo().createdBy());
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.getStorageCredentials to get names of all credentials + * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog. + * + */ + public static Output getStorageCredential(GetStorageCredentialArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getStorageCredential:getStorageCredential", TypeShape.of(GetStorageCredentialResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** This data source can only be used with a workspace-level provider! + * + * Retrieves details about a databricks.StorageCredential that were created by Pulumi or manually. + * + * ## Example Usage + * + * Getting details of an existing storage credential in the metastore + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetStorageCredentialArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getStorageCredential(GetStorageCredentialArgs.builder()
+     *             .name("this")
+     *             .build());
+     * 
+     *         ctx.export("createdBy", this_.storageCredentialInfo().createdBy());
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.getStorageCredentials to get names of all credentials + * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog. + * + */ + public static Output getStorageCredential(GetStorageCredentialArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getStorageCredential:getStorageCredential", TypeShape.of(GetStorageCredentialResult.class), args, Utilities.withVersion(options)); } /** * > **Note** This data source can only be used with a workspace-level provider! @@ -14358,17 +17234,17 @@ public static Output getStorageCredential(GetStorage * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog. * */ - public static CompletableFuture getStorageCredentialPlain(GetStorageCredentialPlainArgs args) { - return getStorageCredentialPlain(args, InvokeOptions.Empty); + public static CompletableFuture getStorageCredentialPlain(GetStorageCredentialPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getStorageCredential:getStorageCredential", TypeShape.of(GetStorageCredentialResult.class), args, Utilities.withVersion(options)); } /** * > **Note** This data source can only be used with a workspace-level provider! * - * Retrieves details about a databricks.StorageCredential that were created by Pulumi or manually. + * Retrieves a list of databricks.StorageCredential objects, that were created by Pulumi or manually, so that special handling could be applied. * * ## Example Usage * - * Getting details of an existing storage credential in the metastore + * List all storage credentials in the metastore * * <!--Start PulumiCodeChooser --> *
@@ -14379,7 +17255,7 @@ public static CompletableFuture getStorageCredential
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetStorageCredentialArgs;
+     * import com.pulumi.databricks.inputs.GetStorageCredentialsArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -14393,11 +17269,9 @@ public static CompletableFuture getStorageCredential
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var this = DatabricksFunctions.getStorageCredential(GetStorageCredentialArgs.builder()
-     *             .name("this")
-     *             .build());
+     *         final var all = DatabricksFunctions.getStorageCredentials();
      * 
-     *         ctx.export("createdBy", this_.storageCredentialInfo().createdBy());
+     *         ctx.export("allStorageCredentials", all.applyValue(getStorageCredentialsResult -> getStorageCredentialsResult.names()));
      *     }
      * }
      * }
@@ -14408,21 +17282,21 @@ public static CompletableFuture getStorageCredential
      * 
      * The following resources are used in the same context:
      * 
-     * * databricks.getStorageCredentials to get names of all credentials
+     * * databricks.StorageCredential to get information about a single credential
      * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog.
      * 
      */
-    public static Output getStorageCredential(GetStorageCredentialArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invoke("databricks:index/getStorageCredential:getStorageCredential", TypeShape.of(GetStorageCredentialResult.class), args, Utilities.withVersion(options));
+    public static Output getStorageCredentials() {
+        return getStorageCredentials(GetStorageCredentialsArgs.Empty, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
      * 
-     * Retrieves details about a databricks.StorageCredential that were created by Pulumi or manually.
+     * Retrieves a list of databricks.StorageCredential objects, that were created by Pulumi or manually, so that special handling could be applied.
      * 
      * ## Example Usage
      * 
-     * Getting details of an existing storage credential in the metastore
+     * List all storage credentials in the metastore
      * 
      * <!--Start PulumiCodeChooser -->
      * 
@@ -14433,7 +17307,7 @@ public static Output getStorageCredential(GetStorage
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetStorageCredentialArgs;
+     * import com.pulumi.databricks.inputs.GetStorageCredentialsArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -14447,11 +17321,9 @@ public static Output getStorageCredential(GetStorage
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var this = DatabricksFunctions.getStorageCredential(GetStorageCredentialArgs.builder()
-     *             .name("this")
-     *             .build());
+     *         final var all = DatabricksFunctions.getStorageCredentials();
      * 
-     *         ctx.export("createdBy", this_.storageCredentialInfo().createdBy());
+     *         ctx.export("allStorageCredentials", all.applyValue(getStorageCredentialsResult -> getStorageCredentialsResult.names()));
      *     }
      * }
      * }
@@ -14462,12 +17334,12 @@ public static Output getStorageCredential(GetStorage
      * 
      * The following resources are used in the same context:
      * 
-     * * databricks.getStorageCredentials to get names of all credentials
+     * * databricks.StorageCredential to get information about a single credential
      * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog.
      * 
      */
-    public static CompletableFuture getStorageCredentialPlain(GetStorageCredentialPlainArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invokeAsync("databricks:index/getStorageCredential:getStorageCredential", TypeShape.of(GetStorageCredentialResult.class), args, Utilities.withVersion(options));
+    public static CompletableFuture getStorageCredentialsPlain() {
+        return getStorageCredentialsPlain(GetStorageCredentialsPlainArgs.Empty, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -14518,8 +17390,8 @@ public static CompletableFuture getStorageCredential
      * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog.
      * 
      */
-    public static Output getStorageCredentials() {
-        return getStorageCredentials(GetStorageCredentialsArgs.Empty, InvokeOptions.Empty);
+    public static Output getStorageCredentials(GetStorageCredentialsArgs args) {
+        return getStorageCredentials(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -14570,8 +17442,8 @@ public static Output getStorageCredentials() {
      * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog.
      * 
      */
-    public static CompletableFuture getStorageCredentialsPlain() {
-        return getStorageCredentialsPlain(GetStorageCredentialsPlainArgs.Empty, InvokeOptions.Empty);
+    public static CompletableFuture getStorageCredentialsPlain(GetStorageCredentialsPlainArgs args) {
+        return getStorageCredentialsPlain(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -14622,8 +17494,8 @@ public static CompletableFuture getStorageCredentia
      * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog.
      * 
      */
-    public static Output getStorageCredentials(GetStorageCredentialsArgs args) {
-        return getStorageCredentials(args, InvokeOptions.Empty);
+    public static Output getStorageCredentials(GetStorageCredentialsArgs args, InvokeOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getStorageCredentials:getStorageCredentials", TypeShape.of(GetStorageCredentialsResult.class), args, Utilities.withVersion(options));
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -14674,8 +17546,8 @@ public static Output getStorageCredentials(GetStora
      * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog.
      * 
      */
-    public static CompletableFuture getStorageCredentialsPlain(GetStorageCredentialsPlainArgs args) {
-        return getStorageCredentialsPlain(args, InvokeOptions.Empty);
+    public static Output getStorageCredentials(GetStorageCredentialsArgs args, InvokeOutputOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getStorageCredentials:getStorageCredentials", TypeShape.of(GetStorageCredentialsResult.class), args, Utilities.withVersion(options));
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -14726,17 +17598,19 @@ public static CompletableFuture getStorageCredentia
      * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog.
      * 
      */
-    public static Output getStorageCredentials(GetStorageCredentialsArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invoke("databricks:index/getStorageCredentials:getStorageCredentials", TypeShape.of(GetStorageCredentialsResult.class), args, Utilities.withVersion(options));
+    public static CompletableFuture getStorageCredentialsPlain(GetStorageCredentialsPlainArgs args, InvokeOptions options) {
+        return Deployment.getInstance().invokeAsync("databricks:index/getStorageCredentials:getStorageCredentials", TypeShape.of(GetStorageCredentialsResult.class), args, Utilities.withVersion(options));
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
      * 
-     * Retrieves a list of databricks.StorageCredential objects, that were created by Pulumi or manually, so that special handling could be applied.
+     * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
+     * 
+     * Retrieves details of a specific table in Unity Catalog, that were created by Pulumi or manually. Use databricks.getTables to retrieve multiple tables in Unity Catalog
      * 
      * ## Example Usage
      * 
-     * List all storage credentials in the metastore
+     * Read  on a specific table `main.certified.fct_transactions`:
      * 
      * <!--Start PulumiCodeChooser -->
      * 
@@ -14747,7 +17621,10 @@ public static Output getStorageCredentials(GetStora
      * import com.pulumi.Pulumi;
      * import com.pulumi.core.Output;
      * import com.pulumi.databricks.DatabricksFunctions;
-     * import com.pulumi.databricks.inputs.GetStorageCredentialsArgs;
+     * import com.pulumi.databricks.inputs.GetTableArgs;
+     * import com.pulumi.databricks.Grants;
+     * import com.pulumi.databricks.GrantsArgs;
+     * import com.pulumi.databricks.inputs.GrantsGrantArgs;
      * import java.util.List;
      * import java.util.ArrayList;
      * import java.util.Map;
@@ -14761,9 +17638,20 @@ public static Output getStorageCredentials(GetStora
      *     }
      * 
      *     public static void stack(Context ctx) {
-     *         final var all = DatabricksFunctions.getStorageCredentials();
+     *         final var fctTransactions = DatabricksFunctions.getTable(GetTableArgs.builder()
+     *             .name("main.certified.fct_transactions")
+     *             .build());
+     * 
+     *         var things = new Grants("things", GrantsArgs.builder()
+     *             .table(fctTransactions.applyValue(getTableResult -> getTableResult.name()))
+     *             .grants(GrantsGrantArgs.builder()
+     *                 .principal("sensitive")
+     *                 .privileges(                
+     *                     "SELECT",
+     *                     "MODIFY")
+     *                 .build())
+     *             .build());
      * 
-     *         ctx.export("allStorageCredentials", all.applyValue(getStorageCredentialsResult -> getStorageCredentialsResult.names()));
      *     }
      * }
      * }
@@ -14774,12 +17662,12 @@ public static Output getStorageCredentials(GetStora
      * 
      * The following resources are used in the same context:
      * 
-     * * databricks.StorageCredential to get information about a single credential
-     * * databricks.StorageCredential to manage Storage Credentials within Unity Catalog.
+     * * databricks.Grant to manage grants within Unity Catalog.
+     * * databricks.getTables to list all tables within a schema in Unity Catalog.
      * 
      */
-    public static CompletableFuture getStorageCredentialsPlain(GetStorageCredentialsPlainArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invokeAsync("databricks:index/getStorageCredentials:getStorageCredentials", TypeShape.of(GetStorageCredentialsResult.class), args, Utilities.withVersion(options));
+    public static Output getTable(GetTableArgs args) {
+        return getTable(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -14846,8 +17734,8 @@ public static CompletableFuture getStorageCredentia
      * * databricks.getTables to list all tables within a schema in Unity Catalog.
      * 
      */
-    public static Output getTable(GetTableArgs args) {
-        return getTable(args, InvokeOptions.Empty);
+    public static CompletableFuture getTablePlain(GetTablePlainArgs args) {
+        return getTablePlain(args, InvokeOptions.Empty);
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -14914,8 +17802,8 @@ public static Output getTable(GetTableArgs args) {
      * * databricks.getTables to list all tables within a schema in Unity Catalog.
      * 
      */
-    public static CompletableFuture getTablePlain(GetTablePlainArgs args) {
-        return getTablePlain(args, InvokeOptions.Empty);
+    public static Output getTable(GetTableArgs args, InvokeOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getTable:getTable", TypeShape.of(GetTableResult.class), args, Utilities.withVersion(options));
     }
     /**
      * > **Note** This data source can only be used with a workspace-level provider!
@@ -14982,7 +17870,7 @@ public static CompletableFuture getTablePlain(GetTablePlainArgs
      * * databricks.getTables to list all tables within a schema in Unity Catalog.
      * 
      */
-    public static Output getTable(GetTableArgs args, InvokeOptions options) {
+    public static Output getTable(GetTableArgs args, InvokeOutputOptions options) {
         return Deployment.getInstance().invoke("databricks:index/getTable:getTable", TypeShape.of(GetTableResult.class), args, Utilities.withVersion(options));
     }
     /**
@@ -15366,8 +18254,159 @@ public static Output getTables(GetTablesArgs args, InvokeOption
      * * databricks.Catalog to manage catalogs within Unity Catalog.
      * 
      */
-    public static CompletableFuture getTablesPlain(GetTablesPlainArgs args, InvokeOptions options) {
-        return Deployment.getInstance().invokeAsync("databricks:index/getTables:getTables", TypeShape.of(GetTablesResult.class), args, Utilities.withVersion(options));
+    public static Output getTables(GetTablesArgs args, InvokeOutputOptions options) {
+        return Deployment.getInstance().invoke("databricks:index/getTables:getTables", TypeShape.of(GetTablesResult.class), args, Utilities.withVersion(options));
+    }
+    /**
+     * > **Note** This data source can only be used with a workspace-level provider!
+     * 
+     * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.
+     * 
+     * Retrieves a list of managed or external table full names in Unity Catalog, that were created by Pulumi or manually. Use databricks.getViews for retrieving a list of views.
+     * 
+     * ## Example Usage
+     * 
+     * Granting `SELECT` and `MODIFY` to `sensitive` group on all tables a _things_ databricks.Schema from _sandbox_ databricks_catalog:
+     * 
+     * <!--Start PulumiCodeChooser -->
+     * 
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetTablesArgs;
+     * import com.pulumi.databricks.Grants;
+     * import com.pulumi.databricks.GrantsArgs;
+     * import com.pulumi.databricks.inputs.GrantsGrantArgs;
+     * import com.pulumi.codegen.internal.KeyedValue;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var things = DatabricksFunctions.getTables(GetTablesArgs.builder()
+     *             .catalogName("sandbox")
+     *             .schemaName("things")
+     *             .build());
+     * 
+     *         final var thingsGrants = things.applyValue(getTablesResult -> {
+     *             final var resources = new ArrayList();
+     *             for (var range : KeyedValue.of(getTablesResult.ids()) {
+     *                 var resource = new Grants("thingsGrants-" + range.key(), GrantsArgs.builder()
+     *                     .table(range.value())
+     *                     .grants(GrantsGrantArgs.builder()
+     *                         .principal("sensitive")
+     *                         .privileges(                        
+     *                             "SELECT",
+     *                             "MODIFY")
+     *                         .build())
+     *                     .build());
+     * 
+     *                 resources.add(resource);
+     *             }
+     * 
+     *             return resources;
+     *         });
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Schema to manage schemas within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static CompletableFuture getTablesPlain(GetTablesPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getTables:getTables", TypeShape.of(GetTablesResult.class), args, Utilities.withVersion(options)); + } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves information about databricks_user. + * + * ## Example Usage + * + * Adding user to administrative group + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetGroupArgs;
+     * import com.pulumi.databricks.inputs.GetUserArgs;
+     * import com.pulumi.databricks.GroupMember;
+     * import com.pulumi.databricks.GroupMemberArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App }{{@code
+     *     public static void main(String[] args) }{{@code
+     *         Pulumi.run(App::stack);
+     *     }}{@code
+     * 
+     *     public static void stack(Context ctx) }{{@code
+     *         final var admins = DatabricksFunctions.getGroup(GetGroupArgs.builder()
+     *             .displayName("admins")
+     *             .build());
+     * 
+     *         final var me = DatabricksFunctions.getUser(GetUserArgs.builder()
+     *             .userName("me}{@literal @}{@code example.com")
+     *             .build());
+     * 
+     *         var myMemberA = new GroupMember("myMemberA", GroupMemberArgs.builder()
+     *             .groupId(admins.applyValue(getGroupResult -> getGroupResult.id()))
+     *             .memberId(me.applyValue(getUserResult -> getUserResult.id()))
+     *             .build());
+     * 
+     *     }}{@code
+     * }}{@code
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * - End to end workspace management guide. + * - databricks.getCurrentUser data to retrieve information about databricks.User or databricks_service_principal, that is calling Databricks REST API. + * - databricks.Group to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). + * - databricks.Group data to retrieve information about databricks.Group members, entitlements and instance profiles. + * - databricks.GroupInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_group. + * - databricks.GroupMember to attach users and groups as group members. + * - databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. + * - databricks.User to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to databricks.Group within the workspace. + * - databricks.UserInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_user. + * + */ + public static Output getUser() { + return getUser(GetUserArgs.Empty, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -15438,8 +18477,8 @@ public static CompletableFuture getTablesPlain(GetTablesPlainAr * - databricks.UserInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_user. * */ - public static Output getUser() { - return getUser(GetUserArgs.Empty, InvokeOptions.Empty); + public static CompletableFuture getUserPlain() { + return getUserPlain(GetUserPlainArgs.Empty, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -15510,8 +18549,8 @@ public static Output getUser() { * - databricks.UserInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_user. * */ - public static CompletableFuture getUserPlain() { - return getUserPlain(GetUserPlainArgs.Empty, InvokeOptions.Empty); + public static Output getUser(GetUserArgs args) { + return getUser(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -15582,8 +18621,8 @@ public static CompletableFuture getUserPlain() { * - databricks.UserInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_user. * */ - public static Output getUser(GetUserArgs args) { - return getUser(args, InvokeOptions.Empty); + public static CompletableFuture getUserPlain(GetUserPlainArgs args) { + return getUserPlain(args, InvokeOptions.Empty); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -15654,8 +18693,8 @@ public static Output getUser(GetUserArgs args) { * - databricks.UserInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_user. * */ - public static CompletableFuture getUserPlain(GetUserPlainArgs args) { - return getUserPlain(args, InvokeOptions.Empty); + public static Output getUser(GetUserArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getUser:getUser", TypeShape.of(GetUserResult.class), args, Utilities.withVersion(options)); } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -15726,7 +18765,7 @@ public static CompletableFuture getUserPlain(GetUserPlainArgs arg * - databricks.UserInstanceProfile to attach databricks.InstanceProfile (AWS) to databricks_user. * */ - public static Output getUser(GetUserArgs args, InvokeOptions options) { + public static Output getUser(GetUserArgs args, InvokeOutputOptions options) { return Deployment.getInstance().invoke("databricks:index/getUser:getUser", TypeShape.of(GetUserResult.class), args, Utilities.withVersion(options)); } /** @@ -16032,6 +19071,83 @@ public static CompletableFuture getViewsPlain(GetViewsPlainArgs public static Output getViews(GetViewsArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getViews:getViews", TypeShape.of(GetViewsResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * Retrieves a list of view full names in Unity Catalog, that were created by Pulumi or manually. Use databricks.getTables for retrieving a list of tables. + * + * ## Example Usage + * + * Granting `SELECT` and `MODIFY` to `sensitive` group on all views in a _things_ databricks.Schema from _sandbox_ databricks_catalog. + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetViewsArgs;
+     * import com.pulumi.databricks.Grants;
+     * import com.pulumi.databricks.GrantsArgs;
+     * import com.pulumi.databricks.inputs.GrantsGrantArgs;
+     * import com.pulumi.codegen.internal.KeyedValue;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var things = DatabricksFunctions.getViews(GetViewsArgs.builder()
+     *             .catalogName("sandbox")
+     *             .schemaName("things")
+     *             .build());
+     * 
+     *         final var thingsGrants = things.applyValue(getViewsResult -> {
+     *             final var resources = new ArrayList();
+     *             for (var range : KeyedValue.of(getViewsResult.ids()) {
+     *                 var resource = new Grants("thingsGrants-" + range.key(), GrantsArgs.builder()
+     *                     .table(range.value())
+     *                     .grants(GrantsGrantArgs.builder()
+     *                         .principal("sensitive")
+     *                         .privileges(                        
+     *                             "SELECT",
+     *                             "MODIFY")
+     *                         .build())
+     *                     .build());
+     * 
+     *                 resources.add(resource);
+     *             }
+     * 
+     *             return resources;
+     *         });
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Schema to manage schemas within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getViews(GetViewsArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getViews:getViews", TypeShape.of(GetViewsResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * @@ -16283,6 +19399,64 @@ public static CompletableFuture getVolumePlain(GetVolumePlainAr public static Output getVolume(GetVolumeArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getVolume:getVolume", TypeShape.of(GetVolumeResult.class), args, Utilities.withVersion(options)); } + /** + * Retrieves details about databricks.Volume that was created by Pulumi or manually. + * A volume can be identified by its three-level (fully qualified) name (in the form of: `catalog_name`.`schema_name`.`volume_name`) as input. This can be retrieved programmatically using databricks.getVolumes data source. + * + * ## Example Usage + * + * * Retrieve details of all volumes in in a _things_ databricks.Schema of a _sandbox_ databricks_catalog: + * + * <!--Start PulumiCodeChooser --> + * <!--End PulumiCodeChooser --> + * + * * Search for a specific volume by its fully qualified name + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetVolumeArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getVolume(GetVolumeArgs.builder()
+     *             .name("catalog.schema.volume")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Volume to manage volumes within Unity Catalog. + * * databricks.Schema to manage schemas within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getVolume(GetVolumeArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getVolume:getVolume", TypeShape.of(GetVolumeResult.class), args, Utilities.withVersion(options)); + } /** * Retrieves details about databricks.Volume that was created by Pulumi or manually. * A volume can be identified by its three-level (fully qualified) name (in the form of: `catalog_name`.`schema_name`.`volume_name`) as input. This can be retrieved programmatically using databricks.getVolumes data source. @@ -16521,6 +19695,66 @@ public static CompletableFuture getVolumesPlain(GetVolumesPlai public static Output getVolumes(GetVolumesArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getVolumes:getVolumes", TypeShape.of(GetVolumesResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** This data source can only be used with a workspace-level provider! + * + * Retrieves a list of databricks.Volume ids (full names), that were created by Pulumi or manually. + * + * ## Plugin Framework Migration + * + * The volumes data source has been migrated from sdkv2 to plugin framework in version 1.57。 If you encounter any problem with this data source and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way `export USE_SDK_V2_DATA_SOURCES="databricks.getVolumes"`. + * + * ## Example Usage + * + * Listing all volumes in a _things_ databricks.Schema of a _sandbox_ databricks_catalog: + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetVolumesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var this = DatabricksFunctions.getVolumes(GetVolumesArgs.builder()
+     *             .catalogName("sandbox")
+     *             .schemaName("things")
+     *             .build());
+     * 
+     *         ctx.export("allVolumes", this_);
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Volume to manage volumes within Unity Catalog. + * * databricks.Schema to manage schemas within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getVolumes(GetVolumesArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getVolumes:getVolumes", TypeShape.of(GetVolumesResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** This data source can only be used with a workspace-level provider! * @@ -16791,6 +20025,48 @@ public static CompletableFuture getZonesPlain(GetZonesPlainArgs public static Output getZones(GetZonesArgs args, InvokeOptions options) { return Deployment.getInstance().invoke("databricks:index/getZones:getZones", TypeShape.of(GetZonesResult.class), args, Utilities.withVersion(options)); } + /** + * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. + * + * This data source allows you to fetch all available AWS availability zones on your workspace on AWS. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.databricks.DatabricksFunctions;
+     * import com.pulumi.databricks.inputs.GetZonesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var zones = DatabricksFunctions.getZones();
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getZones(GetZonesArgs args, InvokeOutputOptions options) { + return Deployment.getInstance().invoke("databricks:index/getZones:getZones", TypeShape.of(GetZonesResult.class), args, Utilities.withVersion(options)); + } /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Utilities.java b/sdk/java/src/main/java/com/pulumi/databricks/Utilities.java index d6e743cc9..b5cfedf1c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Utilities.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Utilities.java @@ -14,6 +14,7 @@ import javax.annotation.Nullable; import com.pulumi.core.internal.Environment; import com.pulumi.deployment.InvokeOptions; +import com.pulumi.deployment.InvokeOutputOptions; public class Utilities { @@ -57,16 +58,28 @@ public static Optional getEnvDouble(java.lang.String... names) return Optional.empty(); } - public static InvokeOptions withVersion(@Nullable InvokeOptions options) { - if (options != null && options.getVersion().isPresent()) { - return options; - } - return new InvokeOptions( - options == null ? null : options.getParent().orElse(null), - options == null ? null : options.getProvider().orElse(null), - getVersion() - ); + public static InvokeOptions withVersion(@Nullable InvokeOptions options) { + if (options != null && options.getVersion().isPresent()) { + return options; + } + return new InvokeOptions( + options == null ? null : options.getParent().orElse(null), + options == null ? null : options.getProvider().orElse(null), + getVersion() + ); + } + + public static InvokeOutputOptions withVersion(@Nullable InvokeOutputOptions options) { + if (options != null && options.getVersion().isPresent()) { + return options; } + return new InvokeOutputOptions( + options == null ? null : options.getParent().orElse(null), + options == null ? null : options.getProvider().orElse(null), + getVersion(), + options == null ? null : options.getDependsOn() + ); + } private static final java.lang.String version; public static java.lang.String getVersion() { diff --git a/sdk/nodejs/getCluster.ts b/sdk/nodejs/getCluster.ts index 6f7ba7f33..29b5406db 100644 --- a/sdk/nodejs/getCluster.ts +++ b/sdk/nodejs/getCluster.ts @@ -20,7 +20,7 @@ import * as utilities from "./utilities"; * import * as databricks from "@pulumi/databricks"; * * const all = databricks.getClusters({}); - * const allGetCluster = all.then(all => .reduce((__obj, [, ]) => ({ ...__obj, [__key]: databricks.getCluster({ + * const allGetCluster = all.then(all => .reduce((__obj, [__key, __value]) => ({ ...__obj, [__key]: databricks.getCluster({ * clusterId: __value, * }) }))); * ``` @@ -102,7 +102,7 @@ export interface GetClusterResult { * import * as databricks from "@pulumi/databricks"; * * const all = databricks.getClusters({}); - * const allGetCluster = all.then(all => .reduce((__obj, [, ]) => ({ ...__obj, [__key]: databricks.getCluster({ + * const allGetCluster = all.then(all => .reduce((__obj, [__key, __value]) => ({ ...__obj, [__key]: databricks.getCluster({ * clusterId: __value, * }) }))); * ``` diff --git a/sdk/nodejs/getSchema.ts b/sdk/nodejs/getSchema.ts index d4429ee91..0f1b45197 100644 --- a/sdk/nodejs/getSchema.ts +++ b/sdk/nodejs/getSchema.ts @@ -21,7 +21,7 @@ import * as utilities from "./utilities"; * const all = databricks.getSchemas({ * catalogName: "sandbox", * }); - * const this = all.then(all => .reduce((__obj, [, ]) => ({ ...__obj, [__key]: databricks.getSchema({ + * const this = all.then(all => .reduce((__obj, [__key, __value]) => ({ ...__obj, [__key]: databricks.getSchema({ * name: __value, * }) }))); * ``` @@ -103,7 +103,7 @@ export interface GetSchemaResult { * const all = databricks.getSchemas({ * catalogName: "sandbox", * }); - * const this = all.then(all => .reduce((__obj, [, ]) => ({ ...__obj, [__key]: databricks.getSchema({ + * const this = all.then(all => .reduce((__obj, [__key, __value]) => ({ ...__obj, [__key]: databricks.getSchema({ * name: __value, * }) }))); * ``` diff --git a/sdk/nodejs/getSqlWarehouse.ts b/sdk/nodejs/getSqlWarehouse.ts index ac30ad3fe..2a49d16cd 100644 --- a/sdk/nodejs/getSqlWarehouse.ts +++ b/sdk/nodejs/getSqlWarehouse.ts @@ -20,7 +20,7 @@ import * as utilities from "./utilities"; * import * as databricks from "@pulumi/databricks"; * * const all = databricks.getSqlWarehouses({}); - * const this = all.then(all => .reduce((__obj, [, ]) => ({ ...__obj, [__key]: databricks.getSqlWarehouse({ + * const this = all.then(all => .reduce((__obj, [__key, __value]) => ({ ...__obj, [__key]: databricks.getSqlWarehouse({ * id: __value, * }) }))); * ``` @@ -261,7 +261,7 @@ export interface GetSqlWarehouseResult { * import * as databricks from "@pulumi/databricks"; * * const all = databricks.getSqlWarehouses({}); - * const this = all.then(all => .reduce((__obj, [, ]) => ({ ...__obj, [__key]: databricks.getSqlWarehouse({ + * const this = all.then(all => .reduce((__obj, [__key, __value]) => ({ ...__obj, [__key]: databricks.getSqlWarehouse({ * id: __value, * }) }))); * ``` diff --git a/sdk/nodejs/getVolume.ts b/sdk/nodejs/getVolume.ts index ee182944b..5927dc385 100644 --- a/sdk/nodejs/getVolume.ts +++ b/sdk/nodejs/getVolume.ts @@ -22,7 +22,7 @@ import * as utilities from "./utilities"; * catalogName: "sandbox", * schemaName: "things", * }); - * const this = all.then(all => .reduce((__obj, [, ]) => ({ ...__obj, [__key]: databricks.getVolume({ + * const this = all.then(all => .reduce((__obj, [__key, __value]) => ({ ...__obj, [__key]: databricks.getVolume({ * name: __value, * }) }))); * ``` @@ -106,7 +106,7 @@ export interface GetVolumeResult { * catalogName: "sandbox", * schemaName: "things", * }); - * const this = all.then(all => .reduce((__obj, [, ]) => ({ ...__obj, [__key]: databricks.getVolume({ + * const this = all.then(all => .reduce((__obj, [__key, __value]) => ({ ...__obj, [__key]: databricks.getVolume({ * name: __value, * }) }))); * ``` diff --git a/sdk/nodejs/package.json b/sdk/nodejs/package.json index 34c744f5e..db08d04f9 100644 --- a/sdk/nodejs/package.json +++ b/sdk/nodejs/package.json @@ -14,7 +14,7 @@ "build": "tsc" }, "dependencies": { - "@pulumi/pulumi": "^3.136.0" + "@pulumi/pulumi": "^3.142.0" }, "devDependencies": { "@types/mime": "^2.0.0", diff --git a/sdk/python/pyproject.toml b/sdk/python/pyproject.toml index 9aaf7698a..d1ee3a708 100644 --- a/sdk/python/pyproject.toml +++ b/sdk/python/pyproject.toml @@ -1,10 +1,10 @@ [project] name = "pulumi_databricks" description = "A Pulumi package for creating and managing databricks cloud resources." - dependencies = ["parver>=0.2.1", "pulumi>=3.136.0,<4.0.0", "semver>=2.8.1", "typing-extensions>=4.11; python_version < \"3.11\""] + dependencies = ["parver>=0.2.1", "pulumi>=3.142.0,<4.0.0", "semver>=2.8.1", "typing-extensions>=4.11; python_version < \"3.11\""] keywords = ["pulumi", "databricks", "category/infrastructure"] readme = "README.md" - requires-python = ">=3.8" + requires-python = ">=3.9" version = "1.0.0a0+dev" [project.license] text = "Apache-2.0"