diff --git a/.vscode/cspell.json b/.vscode/cspell.json index fa6c6cc8f083..ed398e48c6c1 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -157,7 +157,8 @@ "Cifs", "dataserviceid", "uefi", - "Uefi" + "Uefi", + "Reimage" ], "allowCompoundWords": true, "overrides": [ diff --git a/sdk/batch/batch-rest/CHANGELOG.md b/sdk/batch/batch-rest/CHANGELOG.md index d2d14d47afed..ee113ce358b8 100644 --- a/sdk/batch/batch-rest/CHANGELOG.md +++ b/sdk/batch/batch-rest/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## 1.0.0-beta.2 (2024-11-07) + +### Features Added + +- Update API version to `2024-07-01.20.0` for Azure Batch service. + ## 1.0.0-beta.1 (2024-08-07) ### Features Added diff --git a/sdk/batch/batch-rest/LICENSE b/sdk/batch/batch-rest/LICENSE new file mode 100644 index 000000000000..7d5934740965 --- /dev/null +++ b/sdk/batch/batch-rest/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2024 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/batch/batch-rest/MigrationGuide.md b/sdk/batch/batch-rest/MigrationGuide.md new file mode 100644 index 000000000000..1ce6cc205e27 --- /dev/null +++ b/sdk/batch/batch-rest/MigrationGuide.md @@ -0,0 +1,265 @@ +# Guide for migrating to `@azure-rest/batch` from `@azure/batch` + +This guide is intended to assist customers in the migration to `@azure-rest/batch` from the legacy `@azure/batch` package. It will focus on side-by-side comparisons for similar operations between the two packages. + +Familiarity with the legacy client library is assumed. For those new to the Azure Batch JavaScript client library, please refer to [README](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/batch/batch-rest/README.md) and [samples](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/batch/batch-rest/samples) of `@azure-rest/batch` instead of this guide. + +## Table of contents + +- [Migration benefits](#migration-benefits) +- [Constructing the clients](#constructing-the-clients) + - [Authenticate with shared key credentials](#authenticate-with-shared-key-credentials) + - [Authenticate with Microsoft Entra ID](#authenticate-with-microsoft-entra-id) +- [Operation response differences](#operation-response-differences) +- [Error handling](#error-handling) +- [More Examples](#more-examples) + - [Create pools](#create-pools) + - [Create jobs](#create-jobs) + - [Submit tasks](#submit-tasks) + +## Migration benefits + +- Reduced package sizes, `@azure-rest/batch` comes in a form called Rest Level Client(RLC), which is much more lightwight than the traditional Modular Client like `@azure/batch`. It take advantages of Typescript type inferences and reduce bundle size if you were to use it in a browser environment. For more referece of RLC, please see this [doc](https://github.com/Azure/azure-sdk-for-js/blob/main/documentation/rest-clients.md) and our introduction [blog](https://devblogs.microsoft.com/azure-sdk/azure-rest-libraries-for-javascript/). + +- Embrace the lastest Azure JavaScript SDK ecosystem, for example it works [`@azure/identiy`](https://www.npmjs.com/package/@azure/identity) to simplify authentication during local development and with Azure Entra. It also benefit from the onsistant paging API and unified logging with [`@azure/logger`](https://www.npmjs.com/package/@azure/logger) across all Azure Javascript SDKs. + +- Get the latest features/ API versions of Azure Batch service, as we are planing to deprecate the `@azure/batch` package, it may not contain the lastes updates of the Azure Batch service, while with `@azure-rest/batch` you can use the latest API version of Azure Batch service. + +## Constructing the clients + +### Authenticate with shared key credentials + +Both `@azure/batch` and `@azure-rest/batch` support shared key authentication. + +Previously in `@azure/batch`, you can use the `BatchSharedKeyCredentials` class exported from `@azure/batch` to construct a shared key credential, then pass the credential and account endpoint to the `BatchServiceClient` constructor to create a client instance. + +```typescript +import { BatchSharedKeyCredentials, BatchServiceClient } from '@azure/batch'; + +const credential = new BatchSharedKeyCredentials("", ""); +const client = new BatchServiceClient(credential, ""); +``` + +Now in `@azure-rest/batch`, you need to install [`@azure/core-auth`](https://www.npmjs.com/package/@azure/core-auth) package and use the `AzureNamedKeyCredential` class exported from `@azure/core-auth` to construct a shared key credential, then pass the credential and account endpoint to the default exported `createClient` method from `@azure-rest/batch` to create a client instance. + +```typescript +import { AzureNamedKeyCredential } from "@azure/core-auth"; +import createClient from "@azure-rest/batch"; + +const credential = new AzureNamedKeyCredential("", ""); +const client = createClient("", credential); +``` + +### Authenticate with Microsoft Entra ID + +Previously in `@azure/batch`, it only support the legacy [@azure/ms-rest-nodeauth](https://www.npmjs.com/package/@azure/ms-rest-nodeauth) package, and the browser environment is not supported. The following example use the `loginWithVmMSI` method exported from `@azure/ms-rest-nodeauth` to authenticate with the Azure Batch service using MSI (Managed Service Identity) based login from a virtual machine created in Azure. + +```typescript +import { BatchServiceClient } from "@azure/batch"; +import { loginWithVmMSI } from "@azure/ms-rest-nodeauth"; + +const credential = await loginWithVmMSI({ + resource: "https://batch.core.windows.net/" +}); +const client = new BatchServiceClient(credential, ""); +``` + +Now in `@azure-rest/batch`, you can pass any of the [credentials from the `@azure/identity` package](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/identity/identity/samples/AzureIdentityExamples.md) to the `createClient` method to make use of your Microsft Entra ID credentials. In following sample, it creates an instance of [`DefaultAzureCredential`](https://learn.microsoft.com/javascript/api/@azure/identity/defaultazurecredential) to authenticate with the Azure Batch service. + +```typescript +import { DefaultAzureCredential } from "@azure/identity"; +import createClient from "@azure-rest/batch"; + +const credential = new DefaultAzureCredential(); +const client = createClient("", credential); +``` + +## Operation response differences + +Previously in `@azure/batch`, the client operation return a `Promise` that resolves to the result of the response body JSON. The following example demonstrates how to get a job with the `BatchServiceClient` instance. + +```typescript +const job = await client.job.get(""); +console.log(`Job id: ${job.id}, state: ${job.state}`); +``` + +Now in `@azure-rest/batch`, the client operation return a `Promise` that resolves to the response object, which contains the response body and the status code. In order to get the response body JSON, you need to first check if the response is unexpected with the `isUnexpected` helper method, then access the response body. The following example demonstrates how to get a job in `@azure-rest/batch`. + +```typescript +import { isUnexpected } from '@azure-rest/batch'; +const response = await client.path("/jobs/{jobId}", "").get(); +if (isUnexpected(response)) { + throw new Error(`Failed to get job: ${response.body.message}`); +} +console.log(`Response status code: ${response.status}`); + +const job = response.body; +console.log(`Job id: ${job.id}, state: ${job.state}`); +``` + +## Error handling + +Previously in `@azure/batch`, the client operation succeed only when service return expected HTTP status code, for example `201` for create resources operations or `200` for general HTTP GET requests. Unexpected HTTP status code will throw a `RestError` from `@azure/ms-rest-js` package. The following example demonstrate how to handle different errors might occur in the get pool request. + +```typescript +import { RestError } from "@azure/ms-rest-js"; + +try { + const pool = await client.pool.get(""); + console.log("Get pool success: ", pool) +} catch (error) { + if (error instanceof RestError) { + // Returned HTTP status is not 200 + console.log(`Service return unexpected status code ${error.statusCode}: ${error.body}`) + } else { + // Other errors like connection errors or other exceptions + console.log("Failed to get pool with error: ", error) + } +} +``` + +Now, for `@azure-rest/batch`, the client operation won't throw errors even when the returned HTTP status code is unexpected, instead it exports a helper method `isUnexpected` to help you check if the response is unexpected. The following example demostarte how to handle different errors might occur in the get pool request. + +```typescript +try { + const response = await client.path("/pools/{poolId}", "").get(); + if (isUnexpected(response)) { + // Returned HTTP status is not 200 + console.log(`Service return unexpected status code ${response.status}: ${response.body}`) + } { + console.log("Get pool success: ", response.body) + } +} catch (error) { + // Other errors like connection errors or other exceptions + console.log("Failed to get pool with error: ", error) +} +``` + +## More examples + +### Create pools + +Previously in `@azure/batch`, you can use the `BatchServiceClient` instance to create a pool with the `pool.add` method. The following example demonstrates how to create a pool with the `BatchServiceClient` instance. + +```typescript +import { BatchServiceModels } from "@azure/batch"; +import { RestError } from "@azure/ms-rest-js"; +const poolParams: BatchServiceModels.PoolAddParameter = { + id: "", + vmSize: "Standard_D1_v2", + virtualMachineConfiguration: { + nodeAgentSKUId: "batch.node.windows amd64", + imageReference: { + publisher: "microsoftwindowsserver", + offer: "windowsserver", + sku: "2022-datacenter", + }, + }, + networkConfiguration: { + enableAcceleratedNetworking: true, + }, + targetDedicatedNodes: 1, +}; +const result = await client.pool.add(poolParams); +console.log("Pool created"); +``` + +Now in `@azure-rest/batch`, you can use the `path` method of the client instance to send a POST request to the `/pools` endpoint with the pool parameters. Note the `CreatePoolParameters` interface has a `body` field to hold the request body and a `contentType` field to specify the content type of the request. + +```typescript +import { CreatePoolParameters, isUnexpected } from "@azure-rest/batch" + +const poolParams: CreatePoolParameters = { + body: { + id: "", + vmSize: "Standard_D1_v2", + virtualMachineConfiguration: { + nodeAgentSKUId: "batch.node.windows amd64", + imageReference: { + publisher: "microsoftwindowsserver", + offer: "windowsserver", + sku: "2022-datacenter", + }, + }, + networkConfiguration: { + enableAcceleratedNetworking: true, + }, + targetDedicatedNodes: 1, + }, + contentType: "application/json; odata=minimalmetadata", +}; + +const result = await client.path("/pools").post(poolParams); +if (isUnexpected(result)) { + throw new Error(`Failed to create pool: ${result.body.message}`); +} +console.log("Pool created"); +``` + +## Create jobs + +Previously in `@azure/batch`, you can use the `BatchServiceClient` instance to create a job with the `job.add` method. The following example demonstrates how to create a job with the `BatchServiceClient` instance. + +```typescript +import { BatchServiceModels } from "@azure/batch" +const jobAddParam: BatchServiceModels.JobAddParameter = { + id: "", + poolInfo: { poolId: "" }, +}; +const result = await client.job.add(JobAddParameter); +console.log("Job created"); +``` + +Now in `@azure-rest/batch`, you can use the `path` method of the client instance to send a POST request to the `/jobs` endpoint with the job parameters. + +```typescript +import { CreateJobParameters, isUnexpected } from "@azure-rest/batch" + +const jobAddParam: CreateJobParameters = { + body: { + id: "", + poolInfo: { poolId: "" }, + }, + contentType: "application/json; odata=minimalmetadata", +}; + +const result = await client.path("/jobs").post(jobAddParam); +if (isUnexpected(result)) { + throw new Error(`Failed to create job: ${result.body.message}`); +} +console.log(`Job created`); +``` + +## Submit tasks + +Previously in `@azure/batch`, you can use the `BatchServiceClient` instance to submit a task to a job with the `task.add` method. The following example demonstrates how to submit a task with the `BatchServiceClient` instance. + +```typescript +import { BatchServiceModels } from "@azure/batch" +const taskAddParam: BatchServiceModels.TaskAddParameter = { + id: "", + commandLine: "cmd /c echo hello", +}; +const result = await client.task.add("", taskAddParam); +console.log("Task submitted"); +``` + +Now in `@azure-rest/batch`, you can use the `path` method of the client instance to send a POST request to the `/jobs/{jobId}/tasks` endpoint with the task parameters. + +```typescript +import { CreateTaskParameters, isUnexpected } from "@azure-rest/batch" + +const taskAddParam: CreateTaskParameters = { + body: { + id: "", + commandLine: "cmd /c echo hello", + }, + contentType: "application/json; odata=minimalmetadata", +}; + +const result = await client.path("/jobs/{jobId}/tasks", "").post(taskAddParam); +if (isUnexpected(result)) { + throw new Error(`Failed to submit task: ${result.body.message}`); +} +console.log("Task submitted"); +``` diff --git a/sdk/batch/batch-rest/README.md b/sdk/batch/batch-rest/README.md index e01499f07b90..6f1b65691a05 100644 --- a/sdk/batch/batch-rest/README.md +++ b/sdk/batch/batch-rest/README.md @@ -6,7 +6,8 @@ Azure Batch provides Cloud-scale job scheduling and compute management. Key links: -TODO: Add links back when the package is published +- [Package (NPM)](https://www.npmjs.com/package/@azure-rest/batch) +- [API reference documentation](https://docs.microsoft.com/javascript/api/@azure-rest/batch?view=azure-node-preview) ## Getting started diff --git a/sdk/batch/batch-rest/assets.json b/sdk/batch/batch-rest/assets.json index 9df6ed63846e..49ae52aa555c 100644 --- a/sdk/batch/batch-rest/assets.json +++ b/sdk/batch/batch-rest/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "js", "TagPrefix": "js/batch/batch-rest", - "Tag": "js/batch/batch-rest_dee9657e" + "Tag": "js/batch/batch-rest_f23a4f3e48" } diff --git a/sdk/batch/batch-rest/generated/batchClient.ts b/sdk/batch/batch-rest/generated/batchClient.ts index 1bf6345c9680..a9c83a85ea36 100644 --- a/sdk/batch/batch-rest/generated/batchClient.ts +++ b/sdk/batch/batch-rest/generated/batchClient.ts @@ -21,7 +21,7 @@ export interface BatchClientOptions extends ClientOptions { export default function createClient( endpointParam: string, credentials: TokenCredential, - { apiVersion = "2024-02-01.19.0", ...options }: BatchClientOptions = {}, + { apiVersion = "2024-07-01.20.0", ...options }: BatchClientOptions = {}, ): BatchClient { const endpointUrl = options.endpoint ?? options.baseUrl ?? `${endpointParam}`; const userAgentInfo = `azsdk-js-batch-rest/1.0.0-beta.1`; diff --git a/sdk/batch/batch-rest/generated/clientDefinitions.ts b/sdk/batch/batch-rest/generated/clientDefinitions.ts index e0a80065d2e3..4a122daf2aba 100644 --- a/sdk/batch/batch-rest/generated/clientDefinitions.ts +++ b/sdk/batch/batch-rest/generated/clientDefinitions.ts @@ -60,6 +60,9 @@ import { ReplaceNodeUserParameters, GetNodeParameters, RebootNodeParameters, + StartNodeParameters, + DeallocateNodeParameters, + ReimageNodeParameters, DisableNodeSchedulingParameters, EnableNodeSchedulingParameters, GetNodeRemoteLoginSettingsParameters, @@ -191,6 +194,12 @@ import { GetNodeDefaultResponse, RebootNode202Response, RebootNodeDefaultResponse, + StartNode202Response, + StartNodeDefaultResponse, + DeallocateNode202Response, + DeallocateNodeDefaultResponse, + ReimageNode202Response, + ReimageNodeDefaultResponse, DisableNodeScheduling200Response, DisableNodeSchedulingDefaultResponse, EnableNodeScheduling200Response, @@ -271,7 +280,7 @@ export interface CreatePool { post( options: CreatePoolParameters, ): StreamableMethod; - /** Lists all of the Pools in the specified Account. */ + /** Lists all of the Pools which be mounted. */ get( options?: ListPoolsParameters, ): StreamableMethod; @@ -845,6 +854,33 @@ export interface RebootNode { ): StreamableMethod; } +export interface StartNode { + /** You can start a Compute Node only if it has been deallocated. */ + post( + options?: StartNodeParameters, + ): StreamableMethod; +} + +export interface DeallocateNode { + /** You can deallocate a Compute Node only if it is in an idle or running state. */ + post( + options: DeallocateNodeParameters, + ): StreamableMethod< + DeallocateNode202Response | DeallocateNodeDefaultResponse + >; +} + +export interface ReimageNode { + /** + * You can reinstall the operating system on a Compute Node only if it is in an + * idle or running state. This API can be invoked only on Pools created with the + * cloud service configuration property. + */ + post( + options: ReimageNodeParameters, + ): StreamableMethod; +} + export interface DisableNodeScheduling { /** * You can disable Task scheduling on a Compute Node only if its current @@ -871,9 +907,8 @@ export interface EnableNodeScheduling { export interface GetNodeRemoteLoginSettings { /** - * Before you can remotely login to a Compute Node using the remote login - * settings, you must create a user Account on the Compute Node. This API can be - * invoked only on Pools created with the virtual machine configuration property. + * Before you can remotely login to a Compute Node using the remote login settings, + * you must create a user Account on the Compute Node. */ get( options?: GetNodeRemoteLoginSettingsParameters, @@ -1105,6 +1140,24 @@ export interface Routes { poolId: string, nodeId: string, ): RebootNode; + /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/start' has methods for the following verbs: post */ + ( + path: "/pools/{poolId}/nodes/{nodeId}/start", + poolId: string, + nodeId: string, + ): StartNode; + /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/deallocate' has methods for the following verbs: post */ + ( + path: "/pools/{poolId}/nodes/{nodeId}/deallocate", + poolId: string, + nodeId: string, + ): DeallocateNode; + /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/reimage' has methods for the following verbs: post */ + ( + path: "/pools/{poolId}/nodes/{nodeId}/reimage", + poolId: string, + nodeId: string, + ): ReimageNode; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/disablescheduling' has methods for the following verbs: post */ ( path: "/pools/{poolId}/nodes/{nodeId}/disablescheduling", diff --git a/sdk/batch/batch-rest/generated/isUnexpected.ts b/sdk/batch/batch-rest/generated/isUnexpected.ts index a4a757488320..d58bf659df98 100644 --- a/sdk/batch/batch-rest/generated/isUnexpected.ts +++ b/sdk/batch/batch-rest/generated/isUnexpected.ts @@ -120,6 +120,12 @@ import { GetNodeDefaultResponse, RebootNode202Response, RebootNodeDefaultResponse, + StartNode202Response, + StartNodeDefaultResponse, + DeallocateNode202Response, + DeallocateNodeDefaultResponse, + ReimageNode202Response, + ReimageNodeDefaultResponse, DisableNodeScheduling200Response, DisableNodeSchedulingDefaultResponse, EnableNodeScheduling200Response, @@ -203,6 +209,9 @@ const responseMap: Record = { "PUT /pools/{poolId}/nodes/{nodeId}/users/{userName}": ["200"], "GET /pools/{poolId}/nodes/{nodeId}": ["200"], "POST /pools/{poolId}/nodes/{nodeId}/reboot": ["202"], + "POST /pools/{poolId}/nodes/{nodeId}/start": ["202"], + "POST /pools/{poolId}/nodes/{nodeId}/deallocate": ["202"], + "POST /pools/{poolId}/nodes/{nodeId}/reimage": ["202"], "POST /pools/{poolId}/nodes/{nodeId}/disablescheduling": ["200"], "POST /pools/{poolId}/nodes/{nodeId}/enablescheduling": ["200"], "GET /pools/{poolId}/nodes/{nodeId}/remoteloginsettings": ["200"], @@ -414,6 +423,15 @@ export function isUnexpected( export function isUnexpected( response: RebootNode202Response | RebootNodeDefaultResponse, ): response is RebootNodeDefaultResponse; +export function isUnexpected( + response: StartNode202Response | StartNodeDefaultResponse, +): response is StartNodeDefaultResponse; +export function isUnexpected( + response: DeallocateNode202Response | DeallocateNodeDefaultResponse, +): response is DeallocateNodeDefaultResponse; +export function isUnexpected( + response: ReimageNode202Response | ReimageNodeDefaultResponse, +): response is ReimageNodeDefaultResponse; export function isUnexpected( response: | DisableNodeScheduling200Response @@ -575,6 +593,12 @@ export function isUnexpected( | GetNodeDefaultResponse | RebootNode202Response | RebootNodeDefaultResponse + | StartNode202Response + | StartNodeDefaultResponse + | DeallocateNode202Response + | DeallocateNodeDefaultResponse + | ReimageNode202Response + | ReimageNodeDefaultResponse | DisableNodeScheduling200Response | DisableNodeSchedulingDefaultResponse | EnableNodeScheduling200Response @@ -656,6 +680,9 @@ export function isUnexpected( | ReplaceNodeUserDefaultResponse | GetNodeDefaultResponse | RebootNodeDefaultResponse + | StartNodeDefaultResponse + | DeallocateNodeDefaultResponse + | ReimageNodeDefaultResponse | DisableNodeSchedulingDefaultResponse | EnableNodeSchedulingDefaultResponse | GetNodeRemoteLoginSettingsDefaultResponse diff --git a/sdk/batch/batch-rest/generated/models.ts b/sdk/batch/batch-rest/generated/models.ts index 1f444d6698a7..8f910053f15c 100644 --- a/sdk/batch/batch-rest/generated/models.ts +++ b/sdk/batch/batch-rest/generated/models.ts @@ -43,7 +43,11 @@ export interface BatchPoolCreateContent { metadata?: Array; /** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. */ mountConfiguration?: Array; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + /** + * The desired node communication mode for the pool. If omitted, the default value is Default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationMode; /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */ upgradePolicy?: UpgradePolicy; @@ -60,7 +64,7 @@ export interface VirtualMachineConfiguration { nodeAgentSKUId: string; /** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */ windowsConfiguration?: WindowsConfiguration; - /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ + /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ dataDisks?: Array; /** * This only applies to Images that contain the Windows operating system, and @@ -106,6 +110,10 @@ export interface ImageReference { version?: string; /** The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ virtualMachineImageId?: string; + /** The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. */ + sharedGalleryImageId?: string; + /** The community gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from community gallery image GET call. */ + communityGalleryImageId?: string; } /** Windows operating system settings to apply to the virtual machine. */ @@ -122,17 +130,29 @@ export interface WindowsConfiguration { export interface DataDisk { /** The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct logicalUnitNumber. The value must be between 0 and 63, inclusive. */ lun: number; - /** The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. */ + /** + * The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + * + * Possible values: "none", "readonly", "readwrite" + */ caching?: CachingType; /** The initial disk size in gigabytes. */ diskSizeGB: number; - /** The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". */ + /** + * The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". + * + * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs" + */ storageAccountType?: StorageAccountType; } /** The configuration for container-enabled Pools. */ export interface ContainerConfiguration { - /** The container technology to be used. */ + /** + * The container technology to be used. + * + * Possible values: "dockerCompatible", "criCompatible" + */ type: ContainerType; /** The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. */ containerImageNames?: string[]; @@ -167,7 +187,7 @@ export interface BatchNodeIdentityReference { * Azure Compute Gallery Image. */ export interface DiskEncryptionConfiguration { - /** The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */ + /** The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. */ targets?: DiskEncryptionTarget[]; } @@ -177,7 +197,11 @@ export interface DiskEncryptionConfiguration { * with best effort balancing. */ export interface BatchNodePlacementConfiguration { - /** Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. */ + /** + * Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. + * + * Possible values: "Shared", "Startup", "VfsMounts", "Task", "JobPrep", "Applications" + */ policy?: BatchNodePlacementPolicyType; } @@ -207,7 +231,11 @@ export interface VMExtension { export interface OSDisk { /** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */ ephemeralOSDiskSettings?: DiffDiskSettings; - /** Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. */ + /** + * Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. + * + * Possible values: "none", "readonly", "readwrite" + */ caching?: CachingType; /** The initial disk size in GB when creating new OS disk. */ diskSizeGB?: number; @@ -222,21 +250,45 @@ export interface OSDisk { * compute node (VM). */ export interface DiffDiskSettings { - /** Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */ + /** + * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + * + * Possible values: "cachedisk" + */ placement?: DiffDiskPlacement; } /** The managed disk parameters. */ export interface ManagedDisk { - /** The storage account type for managed disk. */ - storageAccountType: StorageAccountType; + /** + * The storage account type for managed disk. + * + * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs" + */ + storageAccountType?: StorageAccountType; + /** Specifies the security profile settings for the managed disk. */ + securityProfile?: VMDiskSecurityProfile; +} + +/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. */ +export interface VMDiskSecurityProfile { + /** + * Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs. + * + * Possible values: "NonPersistedTPM", "VMGuestStateOnly" + */ + securityEncryptionType?: SecurityEncryptionTypes; } /** Specifies the security profile settings for the virtual machine or virtual machine scale set. */ export interface SecurityProfile { - /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. */ + /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. */ encryptionAtHost: boolean; - /** Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. */ + /** + * Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. + * + * Possible values: "trustedLaunch", "confidentialVM" + */ securityType: SecurityTypes; /** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */ uefiSettings: UefiSettings; @@ -261,13 +313,17 @@ export interface ServiceArtifactReference { /** The network configuration for a Pool. */ export interface NetworkConfiguration { - /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. Enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId?: string; - /** The scope of dynamic vnet assignment. */ + /** + * The scope of dynamic vnet assignment. + * + * Possible values: "none", "job" + */ dynamicVNetAssignmentScope?: DynamicVNetAssignmentScope; - /** The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. */ + /** The configuration for endpoints on Compute Nodes in the Batch Pool. */ endpointConfiguration?: BatchPoolEndpointConfiguration; - /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. */ + /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. */ publicIPAddressConfiguration?: PublicIpAddressConfiguration; /** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */ enableAcceleratedNetworking?: boolean; @@ -286,7 +342,11 @@ export interface BatchPoolEndpointConfiguration { export interface InboundNatPool { /** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */ name: string; - /** The protocol of the endpoint. */ + /** + * The protocol of the endpoint. + * + * Possible values: "tcp", "udp" + */ protocol: InboundEndpointProtocol; /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */ backendPort: number; @@ -302,7 +362,11 @@ export interface InboundNatPool { export interface NetworkSecurityGroupRule { /** The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. */ priority: number; - /** The action that should be taken for a specified IP address, subnet range or tag. */ + /** + * The action that should be taken for a specified IP address, subnet range or tag. + * + * Possible values: "allow", "deny" + */ access: NetworkSecurityGroupRuleAccess; /** The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. */ sourceAddressPrefix: string; @@ -312,7 +376,11 @@ export interface NetworkSecurityGroupRule { /** The public IP Address configuration of the networking configuration of a Pool. */ export interface PublicIpAddressConfiguration { - /** The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. */ + /** + * The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. + * + * Possible values: "batchmanaged", "usermanaged", "nopublicipaddresses" + */ provision?: IpAddressProvisioningType; /** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */ ipAddressIds?: string[]; @@ -334,7 +402,7 @@ export interface PublicIpAddressConfiguration { * block Batch from being able to re-run the StartTask. */ export interface BatchStartTask { - /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettings; @@ -358,8 +426,26 @@ export interface BatchTaskContainerSettings { imageName: string; /** The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. */ registry?: ContainerRegistryReference; - /** The location of the container Task working directory. The default is 'taskWorkingDirectory'. */ + /** + * The location of the container Task working directory. The default is 'taskWorkingDirectory'. + * + * Possible values: "taskWorkingDirectory", "containerImageDefault" + */ workingDirectory?: ContainerWorkingDirectory; + /** The paths you want to mounted to container task. If this array is null or be not present, container task will mount entire temporary disk drive in windows (or AZ_BATCH_NODE_ROOT_DIR in Linux). It won't' mount any data paths into container if this array is set as empty. */ + containerHostBatchBindMounts?: Array; +} + +/** The entry of path and mount mode you want to mount into task container. */ +export interface ContainerHostBatchBindMountEntry { + /** + * The path which be mounted to container customer can select. + * + * Possible values: "regional", "zonal" + */ + source?: ContainerHostDataPath; + /** Mount this source path as read-only mode or not. Default value is false (read/write mode). For Linux, if you mount this path as a read/write mode, this does not mean that all users in container have the read/write access for the path, it depends on the access in host VM. If this path is mounted read-only, all users within the container will not be able to modify the path. */ + isReadOnly?: boolean; } /** A single file or multiple files to be downloaded to a Compute Node. */ @@ -398,9 +484,17 @@ export interface UserIdentity { /** Specifies the options for the auto user that runs an Azure Batch Task. */ export interface AutoUserSpecification { - /** The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. */ + /** + * The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. + * + * Possible values: "task", "pool" + */ scope?: AutoUserScope; - /** The elevation level of the auto user. The default value is nonAdmin. */ + /** + * The elevation level of the auto user. The default value is nonAdmin. + * + * Possible values: "nonadmin", "admin" + */ elevationLevel?: ElevationLevel; } @@ -414,7 +508,11 @@ export interface BatchApplicationPackageReference { /** Specifies how Tasks should be distributed across Compute Nodes. */ export interface BatchTaskSchedulingPolicy { - /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + /** + * How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + * + * Possible values: "spread", "pack" + */ nodeFillType: BatchNodeFillType; } @@ -427,7 +525,11 @@ export interface UserAccount { name: string; /** The password for the user Account. */ password: string; - /** The elevation level of the user Account. The default value is nonAdmin. */ + /** + * The elevation level of the user Account. The default value is nonAdmin. + * + * Possible values: "nonadmin", "admin" + */ elevationLevel?: ElevationLevel; /** The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. */ linuxUserConfiguration?: LinuxUserConfiguration; @@ -447,7 +549,11 @@ export interface LinuxUserConfiguration { /** Properties used to create a user Account on a Windows Compute Node. */ export interface WindowsUserConfiguration { - /** The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch'. */ + /** + * The login mode for the user. The default is 'batch'. + * + * Possible values: "batch", "interactive" + */ loginMode?: LoginMode; } @@ -532,11 +638,15 @@ export interface AzureFileShareConfiguration { /** Describes an upgrade policy - automatic, manual, or rolling. */ export interface UpgradePolicy { - /** Specifies the mode of an upgrade to virtual machines in the scale set.

Possible values are:

**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

**Automatic** - All virtual machines in the scale set are automatically updated at the same time.

**Rolling** - Scale set performs updates in batches with an optional pause time in between. */ + /** + * Specifies the mode of an upgrade to virtual machines in the scale set.

Possible values are:

**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

**Automatic** - All virtual machines in the scale set are automatically updated at the same time.

**Rolling** - Scale set performs updates in batches with an optional pause time in between. + * + * Possible values: "automatic", "manual", "rolling" + */ mode: UpgradeMode; /** Configuration parameters used for performing automatic OS Upgrade. The configuration parameters used for performing automatic OS upgrade. */ automaticOSUpgradePolicy?: AutomaticOsUpgradePolicy; - /** The configuration parameters used while performing a rolling upgrade. This property is only supported on Pools with the virtualMachineConfiguration property. */ + /** The configuration parameters used while performing a rolling upgrade. */ rollingUpgradePolicy?: RollingUpgradePolicy; } @@ -544,7 +654,7 @@ export interface UpgradePolicy { export interface AutomaticOsUpgradePolicy { /** Whether OS image rollback feature should be disabled. */ disableAutomaticRollback?: boolean; - /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. */ + /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. */ enableAutomaticOSUpgrade?: boolean; /** Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. */ useRollingUpgradePolicy?: boolean; @@ -580,19 +690,45 @@ export interface NameValuePair { /** Parameters for updating an Azure Batch Pool. */ export interface BatchPoolUpdateContent { + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. This field can be updated only when the pool is empty. */ + displayName?: string; + /** The size of virtual machines in the Pool. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).

This field can be updated only when the pool is empty. */ + vmSize?: string; + /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.

This field can be updated only when the pool is empty. */ + enableInterNodeCommunication?: boolean; /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. */ startTask?: BatchStartTask; /** A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. */ applicationPackageReferences?: Array; /** A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. */ metadata?: Array; - /** The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. */ + /** The virtual machine configuration for the Pool. This property must be specified.

This field can be updated only when the pool is empty. */ + virtualMachineConfiguration?: VirtualMachineConfiguration; + /** + * The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationMode; + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.

This field can be updated only when the pool is empty. */ + taskSlotsPerNode?: number; + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.

This field can be updated only when the pool is empty. */ + taskSchedulingPolicy?: BatchTaskSchedulingPolicy; + /** The network configuration for the Pool. This field can be updated only when the pool is empty. */ + networkConfiguration?: NetworkConfiguration; + /** The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'.

This field can be updated only when the pool is empty. */ + resourceTags?: Record; + /** The list of user Accounts to be created on each Compute Node in the Pool. This field can be updated only when the pool is empty. */ + userAccounts?: Array; + /** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.

This field can be updated only when the pool is empty. */ + mountConfiguration?: Array; + /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.

This field can be updated only when the pool is empty. */ + upgradePolicy?: UpgradePolicy; } /** Parameters for enabling automatic scaling on an Azure Batch Pool. */ export interface BatchPoolEnableAutoScaleContent { - /** The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ + /** The formula for the desired number of Compute Nodes in the Pool. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */ autoScaleFormula?: string; /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */ autoScaleEvaluationInterval?: string; @@ -600,7 +736,7 @@ export interface BatchPoolEnableAutoScaleContent { /** Parameters for evaluating an automatic scaling formula on an Azure Batch Pool. */ export interface BatchPoolEvaluateAutoScaleContent { - /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ + /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling). */ autoScaleFormula: string; } @@ -612,7 +748,11 @@ export interface BatchPoolResizeContent { targetLowPriorityNodes?: number; /** The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ resizeTimeout?: string; - /** Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. */ + /** + * Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ nodeDeallocationOption?: BatchNodeDeallocationOption; } @@ -624,7 +764,11 @@ export interface BatchPoolReplaceContent { applicationPackageReferences: Array; /** A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. */ metadata: Array; - /** The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. */ + /** + * The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationMode; } @@ -634,7 +778,11 @@ export interface BatchNodeRemoveContent { nodeList: string[]; /** The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ resizeTimeout?: string; - /** Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. */ + /** + * Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ nodeDeallocationOption?: BatchNodeDeallocationOption; } @@ -650,7 +798,11 @@ export interface BatchJob { constraints?: BatchJobConstraints; /** The Pool settings associated with the Job. */ poolInfo: BatchPoolInfo; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksComplete; /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; @@ -694,7 +846,7 @@ export interface BatchJobManagerTask { id: string; /** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettings; @@ -755,7 +907,7 @@ export interface OutputFileBlobContainerDestination { containerUrl: string; /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ identityReference?: BatchNodeIdentityReference; - /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ + /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ uploadHeaders?: Array; } @@ -772,7 +924,11 @@ export interface HttpHeader { * to perform the upload. */ export interface OutputFileUploadConfig { - /** The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. */ + /** + * The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. + * + * Possible values: "tasksuccess", "taskfailure", "taskcompletion" + */ uploadCondition: OutputFileUploadCondition; } @@ -826,7 +982,7 @@ export interface AuthenticationTokenSettings { export interface BatchJobPreparationTask { /** A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettings; @@ -865,7 +1021,7 @@ export interface BatchJobPreparationTask { export interface BatchJobReleaseTask { /** A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettings; @@ -896,7 +1052,11 @@ export interface BatchPoolInfo { export interface BatchAutoPoolSpecification { /** A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. */ autoPoolIdPrefix?: string; - /** The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. */ + /** + * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + * + * Possible values: "jobschedule", "job" + */ poolLifetimeOption: BatchPoolLifetimeOption; /** Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option. */ keepAlive?: boolean; @@ -910,7 +1070,7 @@ export interface BatchPoolSpecification { displayName?: string; /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ vmSize: string; - /** The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** The virtual machine configuration for the Pool. This property must be specified. */ virtualMachineConfiguration?: VirtualMachineConfiguration; /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ taskSlotsPerNode?: number; @@ -944,7 +1104,11 @@ export interface BatchPoolSpecification { metadata?: Array; /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ mountConfiguration?: Array; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + /** + * The desired node communication mode for the pool. If omitted, the default value is Default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationMode; /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */ upgradePolicy?: UpgradePolicy; @@ -952,8 +1116,10 @@ export interface BatchPoolSpecification { /** The network configuration for the Job. */ export interface BatchJobNetworkConfiguration { - /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId: string; + /** Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. */ + skipWithdrawFromVNet: boolean; } /** Contains information about the execution of a Job in the Azure Batch service. */ @@ -972,7 +1138,11 @@ export interface BatchJobExecutionInfo { /** An error encountered by the Batch service when scheduling a Job. */ export interface BatchJobSchedulingError { - /** The category of the Job scheduling error. */ + /** + * The category of the Job scheduling error. + * + * Possible values: "usererror", "servererror" + */ category: ErrorCategory; /** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; @@ -1026,15 +1196,25 @@ export interface BatchJobUpdateContent { constraints?: BatchJobConstraints; /** The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. */ poolInfo?: BatchPoolInfo; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** + * The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksComplete; /** A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. */ metadata?: Array; + /** The network configuration for the Job. */ + networkConfiguration?: BatchJobNetworkConfiguration; } /** Parameters for disabling an Azure Batch Job. */ export interface BatchJobDisableContent { - /** What to do with active Tasks associated with the Job. */ + /** + * What to do with active Tasks associated with the Job. + * + * Possible values: "requeue", "terminate", "wait" + */ disableTasks: DisableBatchJobOption; } @@ -1070,9 +1250,17 @@ export interface BatchJobCreateContent { commonEnvironmentSettings?: Array; /** The Pool on which the Batch service runs the Job's Tasks. */ poolInfo: BatchPoolInfo; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksComplete; - /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + /** + * The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + * + * Possible values: "noaction", "performexitoptionsjobaction" + */ onTaskFailure?: OnBatchTaskFailure; /** The network configuration for the Job. */ networkConfiguration?: BatchJobNetworkConfiguration; @@ -1092,7 +1280,11 @@ export interface BatchTaskContainerExecutionInfo { /** Information about a Task failure. */ export interface BatchTaskFailureInfo { - /** The category of the Task error. */ + /** + * The category of the Task error. + * + * Possible values: "usererror", "servererror" + */ category: ErrorCategory; /** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; @@ -1142,9 +1334,17 @@ export interface BatchJobSpecification { displayName?: string; /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ usesTaskDependencies?: boolean; - /** The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksComplete; - /** The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + /** + * The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + * + * Possible values: "noaction", "performexitoptionsjobaction" + */ onTaskFailure?: OnBatchTaskFailure; /** The network configuration for the Job. */ networkConfiguration?: BatchJobNetworkConfiguration; @@ -1304,9 +1504,17 @@ export interface ExitCodeMapping { /** Specifies how the Batch service responds to a particular exit condition. */ export interface ExitOptions { - /** An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** + * An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + * + * Possible values: "none", "disable", "terminate" + */ jobAction?: BatchJobAction; - /** An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. */ + /** + * An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. + * + * Possible values: "satisfy", "block" + */ dependencyAction?: DependencyAction; } @@ -1406,7 +1614,11 @@ export interface BatchTaskExecutionInfo { requeueCount: number; /** The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. */ lastRequeueTime?: Date | string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResult; } @@ -1466,7 +1678,7 @@ export interface BatchNodeUserCreateContent { isAdmin?: boolean; /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ expiryTime?: Date | string; - /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. */ + /** The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. */ password?: string; /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ sshPublicKey?: string; @@ -1474,7 +1686,7 @@ export interface BatchNodeUserCreateContent { /** Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. */ export interface BatchNodeUserUpdateContent { - /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. */ + /** The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. */ password?: string; /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ expiryTime?: Date | string; @@ -1484,13 +1696,41 @@ export interface BatchNodeUserUpdateContent { /** Parameters for rebooting an Azure Batch Compute Node. */ export interface BatchNodeRebootContent { - /** When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. */ + /** + * When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ nodeRebootOption?: BatchNodeRebootOption; } +/** Options for deallocating a Compute Node. */ +export interface BatchNodeDeallocateContent { + /** + * When to deallocate the Compute Node and what to do with currently running Tasks. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ + nodeDeallocateOption?: BatchNodeDeallocateOption; +} + +/** Parameters for reimaging an Azure Batch Compute Node. */ +export interface BatchNodeReimageContent { + /** + * When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ + nodeReimageOption?: BatchNodeReimageOption; +} + /** Parameters for disabling scheduling on an Azure Batch Compute Node. */ export interface BatchNodeDisableSchedulingContent { - /** What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. */ + /** + * What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion" + */ nodeDisableSchedulingOption?: BatchNodeDisableSchedulingOption; } @@ -1518,6 +1758,8 @@ export type DiskEncryptionTarget = string; export type BatchNodePlacementPolicyType = string; /** Alias for DiffDiskPlacement */ export type DiffDiskPlacement = string; +/** Alias for SecurityEncryptionTypes */ +export type SecurityEncryptionTypes = string; /** Alias for SecurityTypes */ export type SecurityTypes = string; /** Alias for DynamicVNetAssignmentScope */ @@ -1530,6 +1772,8 @@ export type NetworkSecurityGroupRuleAccess = string; export type IpAddressProvisioningType = string; /** Alias for ContainerWorkingDirectory */ export type ContainerWorkingDirectory = string; +/** Alias for ContainerHostDataPath */ +export type ContainerHostDataPath = string; /** Alias for AutoUserScope */ export type AutoUserScope = string; /** Alias for ElevationLevel */ @@ -1572,5 +1816,9 @@ export type DependencyAction = string; export type BatchTaskState = string; /** Alias for BatchNodeRebootOption */ export type BatchNodeRebootOption = string; +/** Alias for BatchNodeDeallocateOption */ +export type BatchNodeDeallocateOption = string; +/** Alias for BatchNodeReimageOption */ +export type BatchNodeReimageOption = string; /** Alias for BatchNodeDisableSchedulingOption */ export type BatchNodeDisableSchedulingOption = string; diff --git a/sdk/batch/batch-rest/generated/outputModels.ts b/sdk/batch/batch-rest/generated/outputModels.ts index a8968ab6011c..44f19538a936 100644 --- a/sdk/batch/batch-rest/generated/outputModels.ts +++ b/sdk/batch/batch-rest/generated/outputModels.ts @@ -78,7 +78,7 @@ export interface VirtualMachineConfigurationOutput { nodeAgentSKUId: string; /** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */ windowsConfiguration?: WindowsConfigurationOutput; - /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ + /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ dataDisks?: Array; /** * This only applies to Images that contain the Windows operating system, and @@ -126,6 +126,10 @@ export interface ImageReferenceOutput { virtualMachineImageId?: string; /** The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. */ readonly exactVersion?: string; + /** The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. */ + sharedGalleryImageId?: string; + /** The community gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from community gallery image GET call. */ + communityGalleryImageId?: string; } /** Windows operating system settings to apply to the virtual machine. */ @@ -142,17 +146,29 @@ export interface WindowsConfigurationOutput { export interface DataDiskOutput { /** The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct logicalUnitNumber. The value must be between 0 and 63, inclusive. */ lun: number; - /** The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. */ + /** + * The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + * + * Possible values: "none", "readonly", "readwrite" + */ caching?: CachingTypeOutput; /** The initial disk size in gigabytes. */ diskSizeGB: number; - /** The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". */ + /** + * The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". + * + * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs" + */ storageAccountType?: StorageAccountTypeOutput; } /** The configuration for container-enabled Pools. */ export interface ContainerConfigurationOutput { - /** The container technology to be used. */ + /** + * The container technology to be used. + * + * Possible values: "dockerCompatible", "criCompatible" + */ type: ContainerTypeOutput; /** The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. */ containerImageNames?: string[]; @@ -187,7 +203,7 @@ export interface BatchNodeIdentityReferenceOutput { * Azure Compute Gallery Image. */ export interface DiskEncryptionConfigurationOutput { - /** The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */ + /** The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. */ targets?: DiskEncryptionTargetOutput[]; } @@ -197,7 +213,11 @@ export interface DiskEncryptionConfigurationOutput { * with best effort balancing. */ export interface BatchNodePlacementConfigurationOutput { - /** Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. */ + /** + * Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. + * + * Possible values: "Shared", "Startup", "VfsMounts", "Task", "JobPrep", "Applications" + */ policy?: BatchNodePlacementPolicyTypeOutput; } @@ -227,7 +247,11 @@ export interface VMExtensionOutput { export interface OSDiskOutput { /** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */ ephemeralOSDiskSettings?: DiffDiskSettingsOutput; - /** Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. */ + /** + * Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. + * + * Possible values: "none", "readonly", "readwrite" + */ caching?: CachingTypeOutput; /** The initial disk size in GB when creating new OS disk. */ diskSizeGB?: number; @@ -242,21 +266,45 @@ export interface OSDiskOutput { * compute node (VM). */ export interface DiffDiskSettingsOutput { - /** Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */ + /** + * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + * + * Possible values: "cachedisk" + */ placement?: DiffDiskPlacementOutput; } /** The managed disk parameters. */ export interface ManagedDiskOutput { - /** The storage account type for managed disk. */ - storageAccountType: StorageAccountTypeOutput; + /** + * The storage account type for managed disk. + * + * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs" + */ + storageAccountType?: StorageAccountTypeOutput; + /** Specifies the security profile settings for the managed disk. */ + securityProfile?: VMDiskSecurityProfileOutput; +} + +/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. */ +export interface VMDiskSecurityProfileOutput { + /** + * Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs. + * + * Possible values: "NonPersistedTPM", "VMGuestStateOnly" + */ + securityEncryptionType?: SecurityEncryptionTypesOutput; } /** Specifies the security profile settings for the virtual machine or virtual machine scale set. */ export interface SecurityProfileOutput { - /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. */ + /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. */ encryptionAtHost: boolean; - /** Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. */ + /** + * Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. + * + * Possible values: "trustedLaunch", "confidentialVM" + */ securityType: SecurityTypesOutput; /** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */ uefiSettings: UefiSettingsOutput; @@ -281,13 +329,17 @@ export interface ServiceArtifactReferenceOutput { /** The network configuration for a Pool. */ export interface NetworkConfigurationOutput { - /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. Enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId?: string; - /** The scope of dynamic vnet assignment. */ + /** + * The scope of dynamic vnet assignment. + * + * Possible values: "none", "job" + */ dynamicVNetAssignmentScope?: DynamicVNetAssignmentScopeOutput; - /** The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. */ + /** The configuration for endpoints on Compute Nodes in the Batch Pool. */ endpointConfiguration?: BatchPoolEndpointConfigurationOutput; - /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. */ + /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. */ publicIPAddressConfiguration?: PublicIpAddressConfigurationOutput; /** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */ enableAcceleratedNetworking?: boolean; @@ -306,7 +358,11 @@ export interface BatchPoolEndpointConfigurationOutput { export interface InboundNatPoolOutput { /** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */ name: string; - /** The protocol of the endpoint. */ + /** + * The protocol of the endpoint. + * + * Possible values: "tcp", "udp" + */ protocol: InboundEndpointProtocolOutput; /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */ backendPort: number; @@ -322,7 +378,11 @@ export interface InboundNatPoolOutput { export interface NetworkSecurityGroupRuleOutput { /** The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. */ priority: number; - /** The action that should be taken for a specified IP address, subnet range or tag. */ + /** + * The action that should be taken for a specified IP address, subnet range or tag. + * + * Possible values: "allow", "deny" + */ access: NetworkSecurityGroupRuleAccessOutput; /** The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. */ sourceAddressPrefix: string; @@ -332,7 +392,11 @@ export interface NetworkSecurityGroupRuleOutput { /** The public IP Address configuration of the networking configuration of a Pool. */ export interface PublicIpAddressConfigurationOutput { - /** The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. */ + /** + * The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. + * + * Possible values: "batchmanaged", "usermanaged", "nopublicipaddresses" + */ provision?: IpAddressProvisioningTypeOutput; /** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */ ipAddressIds?: string[]; @@ -354,7 +418,7 @@ export interface PublicIpAddressConfigurationOutput { * block Batch from being able to re-run the StartTask. */ export interface BatchStartTaskOutput { - /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettingsOutput; @@ -378,8 +442,26 @@ export interface BatchTaskContainerSettingsOutput { imageName: string; /** The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. */ registry?: ContainerRegistryReferenceOutput; - /** The location of the container Task working directory. The default is 'taskWorkingDirectory'. */ + /** + * The location of the container Task working directory. The default is 'taskWorkingDirectory'. + * + * Possible values: "taskWorkingDirectory", "containerImageDefault" + */ workingDirectory?: ContainerWorkingDirectoryOutput; + /** The paths you want to mounted to container task. If this array is null or be not present, container task will mount entire temporary disk drive in windows (or AZ_BATCH_NODE_ROOT_DIR in Linux). It won't' mount any data paths into container if this array is set as empty. */ + containerHostBatchBindMounts?: Array; +} + +/** The entry of path and mount mode you want to mount into task container. */ +export interface ContainerHostBatchBindMountEntryOutput { + /** + * The path which be mounted to container customer can select. + * + * Possible values: "regional", "zonal" + */ + source?: ContainerHostDataPathOutput; + /** Mount this source path as read-only mode or not. Default value is false (read/write mode). For Linux, if you mount this path as a read/write mode, this does not mean that all users in container have the read/write access for the path, it depends on the access in host VM. If this path is mounted read-only, all users within the container will not be able to modify the path. */ + isReadOnly?: boolean; } /** A single file or multiple files to be downloaded to a Compute Node. */ @@ -418,9 +500,17 @@ export interface UserIdentityOutput { /** Specifies the options for the auto user that runs an Azure Batch Task. */ export interface AutoUserSpecificationOutput { - /** The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. */ + /** + * The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. + * + * Possible values: "task", "pool" + */ scope?: AutoUserScopeOutput; - /** The elevation level of the auto user. The default value is nonAdmin. */ + /** + * The elevation level of the auto user. The default value is nonAdmin. + * + * Possible values: "nonadmin", "admin" + */ elevationLevel?: ElevationLevelOutput; } @@ -434,7 +524,11 @@ export interface BatchApplicationPackageReferenceOutput { /** Specifies how Tasks should be distributed across Compute Nodes. */ export interface BatchTaskSchedulingPolicyOutput { - /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + /** + * How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + * + * Possible values: "spread", "pack" + */ nodeFillType: BatchNodeFillTypeOutput; } @@ -447,7 +541,11 @@ export interface UserAccountOutput { name: string; /** The password for the user Account. */ password: string; - /** The elevation level of the user Account. The default value is nonAdmin. */ + /** + * The elevation level of the user Account. The default value is nonAdmin. + * + * Possible values: "nonadmin", "admin" + */ elevationLevel?: ElevationLevelOutput; /** The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. */ linuxUserConfiguration?: LinuxUserConfigurationOutput; @@ -467,7 +565,11 @@ export interface LinuxUserConfigurationOutput { /** Properties used to create a user Account on a Windows Compute Node. */ export interface WindowsUserConfigurationOutput { - /** The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch'. */ + /** + * The login mode for the user. The default is 'batch'. + * + * Possible values: "batch", "interactive" + */ loginMode?: LoginModeOutput; } @@ -552,11 +654,15 @@ export interface AzureFileShareConfigurationOutput { /** Describes an upgrade policy - automatic, manual, or rolling. */ export interface UpgradePolicyOutput { - /** Specifies the mode of an upgrade to virtual machines in the scale set.

Possible values are:

**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

**Automatic** - All virtual machines in the scale set are automatically updated at the same time.

**Rolling** - Scale set performs updates in batches with an optional pause time in between. */ + /** + * Specifies the mode of an upgrade to virtual machines in the scale set.

Possible values are:

**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

**Automatic** - All virtual machines in the scale set are automatically updated at the same time.

**Rolling** - Scale set performs updates in batches with an optional pause time in between. + * + * Possible values: "automatic", "manual", "rolling" + */ mode: UpgradeModeOutput; /** Configuration parameters used for performing automatic OS Upgrade. The configuration parameters used for performing automatic OS upgrade. */ automaticOSUpgradePolicy?: AutomaticOsUpgradePolicyOutput; - /** The configuration parameters used while performing a rolling upgrade. This property is only supported on Pools with the virtualMachineConfiguration property. */ + /** The configuration parameters used while performing a rolling upgrade. */ rollingUpgradePolicy?: RollingUpgradePolicyOutput; } @@ -564,7 +670,7 @@ export interface UpgradePolicyOutput { export interface AutomaticOsUpgradePolicyOutput { /** Whether OS image rollback feature should be disabled. */ disableAutomaticRollback?: boolean; - /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. */ + /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. */ enableAutomaticOSUpgrade?: boolean; /** Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. */ useRollingUpgradePolicy?: boolean; @@ -612,15 +718,23 @@ export interface BatchPoolOutput { readonly lastModified?: string; /** The creation time of the Pool. */ readonly creationTime?: string; - /** The current state of the Pool. */ + /** + * The current state of the Pool. + * + * Possible values: "active", "deleting" + */ readonly state?: BatchPoolStateOutput; /** The time at which the Pool entered its current state. */ readonly stateTransitionTime?: string; - /** Whether the Pool is resizing. */ + /** + * Whether the Pool is resizing. + * + * Possible values: "steady", "resizing", "stopping" + */ readonly allocationState?: AllocationStateOutput; /** The time at which the Pool entered its current allocation state. */ readonly allocationStateTransitionTime?: string; - /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes, see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */ readonly vmSize?: string; /** The virtual machine configuration for the Pool. This property must be specified. */ readonly virtualMachineConfiguration?: VirtualMachineConfigurationOutput; @@ -662,15 +776,23 @@ export interface BatchPoolOutput { readonly userAccounts?: Array; /** A list of name-value pairs associated with the Pool as metadata. */ readonly metadata?: Array; - /** Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + /** Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ readonly stats?: BatchPoolStatisticsOutput; /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ readonly mountConfiguration?: Array; /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ readonly identity?: BatchPoolIdentityOutput; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + /** + * The desired node communication mode for the pool. If omitted, the default value is Default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationModeOutput; - /** The current state of the pool communication mode. */ + /** + * The current state of the pool communication mode. + * + * Possible values: "default", "classic", "simplified" + */ readonly currentNodeCommunicationMode?: BatchNodeCommunicationModeOutput; /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */ upgradePolicy?: UpgradePolicyOutput; @@ -770,7 +892,11 @@ export interface BatchPoolResourceStatisticsOutput { /** The identity of the Batch pool, if configured. */ export interface BatchPoolIdentityOutput { - /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ + /** + * The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + * + * Possible values: "UserAssigned", "None" + */ type: BatchPoolIdentityTypeOutput; /** The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ userAssignedIdentities?: Array; @@ -803,13 +929,21 @@ export interface BatchSupportedImageOutput { nodeAgentSKUId: string; /** The reference to the Azure Virtual Machine's Marketplace Image. */ imageReference: ImageReferenceOutput; - /** The type of operating system (e.g. Windows or Linux) of the Image. */ + /** + * The type of operating system (e.g. Windows or Linux) of the Image. + * + * Possible values: "linux", "windows" + */ osType: OSTypeOutput; /** The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. */ capabilities?: string[]; /** The time when the Azure Batch service will stop accepting create Pool requests for the Image. */ batchSupportEndOfLife?: string; - /** Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. */ + /** + * Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. + * + * Possible values: "verified", "unverified" + */ verificationType: ImageVerificationTypeOutput; } @@ -859,6 +993,10 @@ export interface BatchNodeCountsOutput { unusable: number; /** The number of Compute Nodes in the waitingForStartTask state. */ waitingForStartTask: number; + /** The number of Compute Nodes in the deallocated state. */ + deallocated: number; + /** The number of Compute Nodes in the deallocating state. */ + deallocating: number; /** The total number of Compute Nodes. */ total: number; /** The number of Compute Nodes in the upgradingOS state. */ @@ -881,11 +1019,19 @@ export interface BatchJobOutput { readonly lastModified?: string; /** The creation time of the Job. */ readonly creationTime?: string; - /** The current state of the Job. */ + /** + * The current state of the Job. + * + * Possible values: "active", "disabling", "disabled", "enabling", "terminating", "completed", "deleting" + */ readonly state?: BatchJobStateOutput; /** The time at which the Job entered its current state. */ readonly stateTransitionTime?: string; - /** The previous state of the Job. This property is not set if the Job is in its initial Active state. */ + /** + * The previous state of the Job. This property is not set if the Job is in its initial Active state. + * + * Possible values: "active", "disabling", "disabled", "enabling", "terminating", "completed", "deleting" + */ readonly previousState?: BatchJobStateOutput; /** The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. */ readonly previousStateTransitionTime?: string; @@ -907,9 +1053,17 @@ export interface BatchJobOutput { readonly commonEnvironmentSettings?: Array; /** The Pool settings associated with the Job. */ poolInfo: BatchPoolInfoOutput; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksCompleteOutput; - /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + /** + * The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + * + * Possible values: "noaction", "performexitoptionsjobaction" + */ readonly onTaskFailure?: OnBatchTaskFailureOutput; /** The network configuration for the Job. */ readonly networkConfiguration?: BatchJobNetworkConfigurationOutput; @@ -917,7 +1071,7 @@ export interface BatchJobOutput { metadata?: Array; /** The execution information for the Job. */ readonly executionInfo?: BatchJobExecutionInfoOutput; - /** Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + /** Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ readonly stats?: BatchJobStatisticsOutput; } @@ -959,7 +1113,7 @@ export interface BatchJobManagerTaskOutput { id: string; /** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettingsOutput; @@ -1020,7 +1174,7 @@ export interface OutputFileBlobContainerDestinationOutput { containerUrl: string; /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ identityReference?: BatchNodeIdentityReferenceOutput; - /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ + /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ uploadHeaders?: Array; } @@ -1037,7 +1191,11 @@ export interface HttpHeaderOutput { * to perform the upload. */ export interface OutputFileUploadConfigOutput { - /** The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. */ + /** + * The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. + * + * Possible values: "tasksuccess", "taskfailure", "taskcompletion" + */ uploadCondition: OutputFileUploadConditionOutput; } @@ -1091,7 +1249,7 @@ export interface AuthenticationTokenSettingsOutput { export interface BatchJobPreparationTaskOutput { /** A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettingsOutput; @@ -1130,7 +1288,7 @@ export interface BatchJobPreparationTaskOutput { export interface BatchJobReleaseTaskOutput { /** A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettingsOutput; @@ -1161,7 +1319,11 @@ export interface BatchPoolInfoOutput { export interface BatchAutoPoolSpecificationOutput { /** A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. */ autoPoolIdPrefix?: string; - /** The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. */ + /** + * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + * + * Possible values: "jobschedule", "job" + */ poolLifetimeOption: BatchPoolLifetimeOptionOutput; /** Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option. */ keepAlive?: boolean; @@ -1175,7 +1337,7 @@ export interface BatchPoolSpecificationOutput { displayName?: string; /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ vmSize: string; - /** The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** The virtual machine configuration for the Pool. This property must be specified. */ virtualMachineConfiguration?: VirtualMachineConfigurationOutput; /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ taskSlotsPerNode?: number; @@ -1209,7 +1371,11 @@ export interface BatchPoolSpecificationOutput { metadata?: Array; /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ mountConfiguration?: Array; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + /** + * The desired node communication mode for the pool. If omitted, the default value is Default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationModeOutput; /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */ upgradePolicy?: UpgradePolicyOutput; @@ -1217,8 +1383,10 @@ export interface BatchPoolSpecificationOutput { /** The network configuration for the Job. */ export interface BatchJobNetworkConfigurationOutput { - /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId: string; + /** Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. */ + skipWithdrawFromVNet: boolean; } /** Contains information about the execution of a Job in the Azure Batch service. */ @@ -1237,7 +1405,11 @@ export interface BatchJobExecutionInfoOutput { /** An error encountered by the Batch service when scheduling a Job. */ export interface BatchJobSchedulingErrorOutput { - /** The category of the Job scheduling error. */ + /** + * The category of the Job scheduling error. + * + * Possible values: "usererror", "servererror" + */ category: ErrorCategoryOutput; /** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; @@ -1321,7 +1493,11 @@ export interface BatchJobPreparationTaskExecutionInfoOutput { startTime: string; /** The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. */ endTime?: string; - /** The current state of the Job Preparation Task on the Compute Node. */ + /** + * The current state of the Job Preparation Task on the Compute Node. + * + * Possible values: "running", "completed" + */ state: BatchJobPreparationTaskStateOutput; /** The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ taskRootDirectory?: string; @@ -1337,7 +1513,11 @@ export interface BatchJobPreparationTaskExecutionInfoOutput { retryCount: number; /** The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ lastRetryTime?: string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -1353,7 +1533,11 @@ export interface BatchTaskContainerExecutionInfoOutput { /** Information about a Task failure. */ export interface BatchTaskFailureInfoOutput { - /** The category of the Task error. */ + /** + * The category of the Task error. + * + * Possible values: "usererror", "servererror" + */ category: ErrorCategoryOutput; /** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; @@ -1372,7 +1556,11 @@ export interface BatchJobReleaseTaskExecutionInfoOutput { startTime: string; /** The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. */ endTime?: string; - /** The current state of the Job Release Task on the Compute Node. */ + /** + * The current state of the Job Release Task on the Compute Node. + * + * Possible values: "running", "completed" + */ state: BatchJobReleaseTaskStateOutput; /** The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ taskRootDirectory?: string; @@ -1384,7 +1572,11 @@ export interface BatchJobReleaseTaskExecutionInfoOutput { containerInfo?: BatchTaskContainerExecutionInfoOutput; /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: BatchTaskFailureInfoOutput; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -1441,11 +1633,19 @@ export interface BatchJobScheduleOutput { readonly lastModified?: string; /** The creation time of the Job Schedule. */ readonly creationTime?: string; - /** The current state of the Job Schedule. */ + /** + * The current state of the Job Schedule. + * + * Possible values: "active", "completed", "disabled", "terminating", "deleting" + */ readonly state?: BatchJobScheduleStateOutput; /** The time at which the Job Schedule entered the current state. */ readonly stateTransitionTime?: string; - /** The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. */ + /** + * The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. + * + * Possible values: "active", "completed", "disabled", "terminating", "deleting" + */ readonly previousState?: BatchJobScheduleStateOutput; /** The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state. */ readonly previousStateTransitionTime?: string; @@ -1488,9 +1688,17 @@ export interface BatchJobSpecificationOutput { displayName?: string; /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ usesTaskDependencies?: boolean; - /** The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksCompleteOutput; - /** The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + /** + * The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + * + * Possible values: "noaction", "performexitoptionsjobaction" + */ onTaskFailure?: OnBatchTaskFailureOutput; /** The network configuration for the Job. */ networkConfiguration?: BatchJobNetworkConfigurationOutput; @@ -1598,9 +1806,17 @@ export interface ExitCodeMappingOutput { /** Specifies how the Batch service responds to a particular exit condition. */ export interface ExitOptionsOutput { - /** An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** + * An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + * + * Possible values: "none", "disable", "terminate" + */ jobAction?: BatchJobActionOutput; - /** An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. */ + /** + * An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. + * + * Possible values: "satisfy", "block" + */ dependencyAction?: DependencyActionOutput; } @@ -1698,15 +1914,23 @@ export interface BatchTaskOutput { readonly creationTime?: string; /** How the Batch service should respond when the Task completes. */ readonly exitConditions?: ExitConditionsOutput; - /** The current state of the Task. */ + /** + * The current state of the Task. + * + * Possible values: "active", "preparing", "running", "completed" + */ readonly state?: BatchTaskStateOutput; /** The time at which the Task entered its current state. */ readonly stateTransitionTime?: string; - /** The previous state of the Task. This property is not set if the Task is in its initial Active state. */ + /** + * The previous state of the Task. This property is not set if the Task is in its initial Active state. + * + * Possible values: "active", "preparing", "running", "completed" + */ readonly previousState?: BatchTaskStateOutput; /** The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. */ readonly previousStateTransitionTime?: string; - /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ readonly commandLine?: string; /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ readonly containerSettings?: BatchTaskContainerSettingsOutput; @@ -1760,7 +1984,11 @@ export interface BatchTaskExecutionInfoOutput { requeueCount: number; /** The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. */ lastRequeueTime?: string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -1814,7 +2042,11 @@ export interface BatchTaskAddCollectionResultOutput { /** Result for a single Task added as part of an add Task collection operation. */ export interface BatchTaskAddResultOutput { - /** The status of the add Task request. */ + /** + * The status of the add Task request. + * + * Possible values: "success", "clienterror", "servererror" + */ status: BatchTaskAddStatusOutput; /** The ID of the Task for which this is the result. */ taskId: string; @@ -1852,15 +2084,27 @@ export interface BatchSubtaskOutput { containerInfo?: BatchTaskContainerExecutionInfoOutput; /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: BatchTaskFailureInfoOutput; - /** The current state of the subtask. */ + /** + * The current state of the subtask. + * + * Possible values: "preparing", "running", "completed" + */ state?: BatchSubtaskStateOutput; /** The time at which the subtask entered its current state. */ stateTransitionTime?: string; - /** The previous state of the subtask. This property is not set if the subtask is in its initial running state. */ + /** + * The previous state of the subtask. This property is not set if the subtask is in its initial running state. + * + * Possible values: "preparing", "running", "completed" + */ previousState?: BatchSubtaskStateOutput; /** The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state. */ previousStateTransitionTime?: string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -1907,9 +2151,17 @@ export interface BatchNodeOutput { id?: string; /** The URL of the Compute Node. */ url?: string; - /** The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. */ + /** + * The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. + * + * Possible values: "idle", "rebooting", "reimaging", "running", "unusable", "creating", "starting", "waitingforstarttask", "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", "upgradingos", "deallocated", "deallocating" + */ state?: BatchNodeStateOutput; - /** Whether the Compute Node is available for Task scheduling. */ + /** + * Whether the Compute Node is available for Task scheduling. + * + * Possible values: "enabled", "disabled" + */ schedulingState?: SchedulingStateOutput; /** The time at which the Compute Node entered its current state. */ stateTransitionTime?: string; @@ -1959,7 +2211,11 @@ export interface BatchTaskInfoOutput { taskId?: string; /** The ID of the subtask if the Task is a multi-instance Task. */ subtaskId?: number; - /** The current state of the Task. */ + /** + * The current state of the Task. + * + * Possible values: "active", "preparing", "running", "completed" + */ taskState: BatchTaskStateOutput; /** Information about the execution of the Task. */ executionInfo?: BatchTaskExecutionInfoOutput; @@ -1967,7 +2223,11 @@ export interface BatchTaskInfoOutput { /** Information about a StartTask running on a Compute Node. */ export interface BatchStartTaskInfoOutput { - /** The state of the StartTask on the Compute Node. */ + /** + * The state of the StartTask on the Compute Node. + * + * Possible values: "running", "completed" + */ state: BatchStartTaskStateOutput; /** The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). */ startTime: string; @@ -1983,7 +2243,11 @@ export interface BatchStartTaskInfoOutput { retryCount: number; /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ lastRetryTime?: string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -2007,7 +2271,11 @@ export interface BatchNodeEndpointConfigurationOutput { export interface InboundEndpointOutput { /** The name of the endpoint. */ name: string; - /** The protocol of the endpoint. */ + /** + * The protocol of the endpoint. + * + * Possible values: "tcp", "udp" + */ protocol: InboundEndpointProtocolOutput; /** The public IP address of the Compute Node. */ publicIPAddress: string; @@ -2088,7 +2356,11 @@ export interface InstanceViewStatusOutput { code?: string; /** The localized label for the status. */ displayStatus?: string; - /** Level code. */ + /** + * Level code. + * + * Possible values: "Error", "Info", "Warning" + */ level?: StatusLevelTypesOutput; /** The detailed status message. */ message?: string; @@ -2116,6 +2388,8 @@ export type DiskEncryptionTargetOutput = string; export type BatchNodePlacementPolicyTypeOutput = string; /** Alias for DiffDiskPlacementOutput */ export type DiffDiskPlacementOutput = string; +/** Alias for SecurityEncryptionTypesOutput */ +export type SecurityEncryptionTypesOutput = string; /** Alias for SecurityTypesOutput */ export type SecurityTypesOutput = string; /** Alias for DynamicVNetAssignmentScopeOutput */ @@ -2128,6 +2402,8 @@ export type NetworkSecurityGroupRuleAccessOutput = string; export type IpAddressProvisioningTypeOutput = string; /** Alias for ContainerWorkingDirectoryOutput */ export type ContainerWorkingDirectoryOutput = string; +/** Alias for ContainerHostDataPathOutput */ +export type ContainerHostDataPathOutput = string; /** Alias for AutoUserScopeOutput */ export type AutoUserScopeOutput = string; /** Alias for ElevationLevelOutput */ diff --git a/sdk/batch/batch-rest/generated/parameters.ts b/sdk/batch/batch-rest/generated/parameters.ts index 1b5538cab3de..3567b1bf5c96 100644 --- a/sdk/batch/batch-rest/generated/parameters.ts +++ b/sdk/batch/batch-rest/generated/parameters.ts @@ -25,6 +25,8 @@ import { BatchNodeUserCreateContent, BatchNodeUserUpdateContent, BatchNodeRebootContent, + BatchNodeDeallocateContent, + BatchNodeReimageContent, BatchNodeDisableSchedulingContent, UploadBatchServiceLogsContent, } from "./models.js"; @@ -138,7 +140,7 @@ export interface ListPoolUsageMetricsQueryParamProperties { endtime?: Date | string; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. */ $filter?: string; } @@ -226,7 +228,7 @@ export interface ListPoolsQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools. */ $filter?: string; /** An OData $select clause. */ @@ -904,7 +906,7 @@ export interface ListSupportedImagesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. */ $filter?: string; } @@ -947,7 +949,7 @@ export interface ListPoolNodeCountsQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. */ $filter?: string; } @@ -1007,6 +1009,8 @@ export interface DeleteJobHeaders { export interface DeleteJobQueryParamProperties { /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ timeOut?: number; + /** If true, the server will delete the Job even if the corresponding nodes have not fully processed the deletion. The default value is false. */ + force?: boolean; } export interface DeleteJobQueryParam { @@ -1394,6 +1398,8 @@ export interface TerminateJobBodyParam { export interface TerminateJobQueryParamProperties { /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ timeOut?: number; + /** If true, the server will terminate the Job even if the corresponding nodes have not fully processed the termination. The default value is false. */ + force?: boolean; } export interface TerminateJobQueryParam { @@ -1486,7 +1492,7 @@ export interface ListJobsQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs. */ $filter?: string; /** An OData $select clause. */ @@ -1533,7 +1539,7 @@ export interface ListJobsFromScheduleQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. */ $filter?: string; /** An OData $select clause. */ @@ -1580,7 +1586,7 @@ export interface ListJobPreparationAndReleaseTaskStatusQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. */ $filter?: string; /** An OData $select clause. */ @@ -1733,6 +1739,8 @@ export interface DeleteJobScheduleHeaders { export interface DeleteJobScheduleQueryParamProperties { /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ timeOut?: number; + /** If true, the server will delete the JobSchedule even if the corresponding nodes have not fully processed the deletion. The default value is false. */ + force?: boolean; } export interface DeleteJobScheduleQueryParam { @@ -2103,6 +2111,8 @@ export interface TerminateJobScheduleHeaders { export interface TerminateJobScheduleQueryParamProperties { /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ timeOut?: number; + /** If true, the server will terminate the JobSchedule even if the corresponding nodes have not fully processed the termination. The default value is false. */ + force?: boolean; } export interface TerminateJobScheduleQueryParam { @@ -2188,7 +2198,7 @@ export interface ListJobSchedulesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. */ $filter?: string; /** An OData $select clause. */ @@ -2280,7 +2290,7 @@ export interface ListTasksQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks. */ $filter?: string; /** An OData $select clause. */ @@ -2843,7 +2853,7 @@ export interface ListTaskFilesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files. */ $filter?: string; /** @@ -3068,6 +3078,129 @@ export type RebootNodeParameters = RebootNodeQueryParam & RebootNodeBodyParam & RequestParameters; +export interface StartNodeHeaders { + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + */ + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; +} + +export interface StartNodeQueryParamProperties { + /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ + timeOut?: number; +} + +export interface StartNodeQueryParam { + queryParameters?: StartNodeQueryParamProperties; +} + +export interface StartNodeHeaderParam { + headers?: RawHttpHeadersInput & StartNodeHeaders; +} + +export type StartNodeParameters = StartNodeQueryParam & + StartNodeHeaderParam & + RequestParameters; + +export interface DeallocateNodeHeaders { + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + */ + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; +} + +export interface DeallocateNodeBodyParam { + /** The options to use for deallocating the Compute Node. */ + body?: BatchNodeDeallocateContent; +} + +export interface DeallocateNodeQueryParamProperties { + /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ + timeOut?: number; +} + +export interface DeallocateNodeQueryParam { + queryParameters?: DeallocateNodeQueryParamProperties; +} + +export interface DeallocateNodeHeaderParam { + headers?: RawHttpHeadersInput & DeallocateNodeHeaders; +} + +export interface DeallocateNodeMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type DeallocateNodeParameters = DeallocateNodeQueryParam & + DeallocateNodeHeaderParam & + DeallocateNodeMediaTypesParam & + DeallocateNodeBodyParam & + RequestParameters; + +export interface ReimageNodeHeaders { + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + */ + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; +} + +export interface ReimageNodeBodyParam { + /** The options to use for reimaging the Compute Node. */ + body?: BatchNodeReimageContent; +} + +export interface ReimageNodeQueryParamProperties { + /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ + timeOut?: number; +} + +export interface ReimageNodeQueryParam { + queryParameters?: ReimageNodeQueryParamProperties; +} + +export interface ReimageNodeHeaderParam { + headers?: RawHttpHeadersInput & ReimageNodeHeaders; +} + +export interface ReimageNodeMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type ReimageNodeParameters = ReimageNodeQueryParam & + ReimageNodeHeaderParam & + ReimageNodeMediaTypesParam & + ReimageNodeBodyParam & + RequestParameters; + export interface DisableNodeSchedulingHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration @@ -3251,7 +3384,7 @@ export interface ListNodesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. */ $filter?: string; /** An OData $select clause. */ @@ -3506,7 +3639,7 @@ export interface ListNodeFilesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. */ $filter?: string; /** Whether to list children of a directory. */ diff --git a/sdk/batch/batch-rest/generated/responses.ts b/sdk/batch/batch-rest/generated/responses.ts index 063e5a51c61f..7647899ac994 100644 --- a/sdk/batch/batch-rest/generated/responses.ts +++ b/sdk/batch/batch-rest/generated/responses.ts @@ -1399,6 +1399,78 @@ export interface RebootNodeDefaultResponse extends HttpResponse { body: BatchErrorOutput; } +export interface StartNode202Headers { + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ + "client-request-id"?: string; + /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ + "request-id"?: string; +} + +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface StartNode202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & StartNode202Headers; +} + +export interface StartNodeDefaultResponse extends HttpResponse { + status: string; + body: BatchErrorOutput; +} + +export interface DeallocateNode202Headers { + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ + "client-request-id"?: string; + /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ + "request-id"?: string; +} + +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface DeallocateNode202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & DeallocateNode202Headers; +} + +export interface DeallocateNodeDefaultResponse extends HttpResponse { + status: string; + body: BatchErrorOutput; +} + +export interface ReimageNode202Headers { + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ + "client-request-id"?: string; + /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ + "request-id"?: string; +} + +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface ReimageNode202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & ReimageNode202Headers; +} + +export interface ReimageNodeDefaultResponse extends HttpResponse { + status: string; + body: BatchErrorOutput; +} + export interface DisableNodeScheduling200Headers { /** The OData ID of the resource to which the request applied. */ dataserviceid: string; diff --git a/sdk/batch/batch-rest/package.json b/sdk/batch/batch-rest/package.json index d6023626dfc3..d7bd765b50bf 100644 --- a/sdk/batch/batch-rest/package.json +++ b/sdk/batch/batch-rest/package.json @@ -1,6 +1,6 @@ { "name": "@azure-rest/batch", - "version": "1.0.0-beta.1", + "version": "1.0.0-beta.2", "description": "Batch Service Rest Level Client", "engines": { "node": ">=18.0.0" @@ -61,12 +61,12 @@ "disableDocsMs": true }, "dependencies": { - "@azure-rest/core-client": "^1.4.0", + "@azure-rest/core-client": "^2.1.0", "@azure/core-auth": "^1.6.0", - "@azure/core-paging": "^1.5.0", "@azure/core-rest-pipeline": "^1.16.0", "@azure/logger": "^1.0.0", - "tslib": "^2.6.2" + "tslib": "^2.6.2", + "@azure/core-paging": "^1.5.0" }, "devDependencies": { "@azure-tools/test-credential": "^1.0.0", diff --git a/sdk/batch/batch-rest/review/batch.api.md b/sdk/batch/batch-rest/review/batch.api.md index c753eff05362..bbb7380b65e3 100644 --- a/sdk/batch/batch-rest/review/batch.api.md +++ b/sdk/batch/batch-rest/review/batch.api.md @@ -328,11 +328,13 @@ export interface BatchJobManagerTaskOutput { // @public export interface BatchJobNetworkConfiguration { + skipWithdrawFromVNet: boolean; subnetId: string; } // @public export interface BatchJobNetworkConfigurationOutput { + skipWithdrawFromVNet: boolean; subnetId: string; } @@ -692,6 +694,7 @@ export interface BatchJobUpdateContent { constraints?: BatchJobConstraints; maxParallelTasks?: number; metadata?: Array; + networkConfiguration?: BatchJobNetworkConfiguration; onAllTasksComplete?: OnAllBatchTasksComplete; poolInfo?: BatchPoolInfo; priority?: number; @@ -712,6 +715,8 @@ export type BatchNodeCommunicationModeOutput = string; // @public export interface BatchNodeCountsOutput { creating: number; + deallocated: number; + deallocating: number; idle: number; leavingPool: number; offline: number; @@ -728,6 +733,14 @@ export interface BatchNodeCountsOutput { waitingForStartTask: number; } +// @public +export interface BatchNodeDeallocateContent { + nodeDeallocateOption?: BatchNodeDeallocateOption; +} + +// @public +export type BatchNodeDeallocateOption = string; + // @public export type BatchNodeDeallocationOption = string; @@ -857,6 +870,14 @@ export interface BatchNodeRebootContent { // @public export type BatchNodeRebootOption = string; +// @public +export interface BatchNodeReimageContent { + nodeReimageOption?: BatchNodeReimageOption; +} + +// @public +export type BatchNodeReimageOption = string; + // @public export interface BatchNodeRemoteLoginSettingsOutput { remoteLoginIPAddress: string; @@ -1140,9 +1161,20 @@ export interface BatchPoolStatisticsOutput { // @public export interface BatchPoolUpdateContent { applicationPackageReferences?: Array; + displayName?: string; + enableInterNodeCommunication?: boolean; metadata?: Array; + mountConfiguration?: Array; + networkConfiguration?: NetworkConfiguration; + resourceTags?: Record; startTask?: BatchStartTask; targetNodeCommunicationMode?: BatchNodeCommunicationMode; + taskSchedulingPolicy?: BatchTaskSchedulingPolicy; + taskSlotsPerNode?: number; + upgradePolicy?: UpgradePolicy; + userAccounts?: Array; + virtualMachineConfiguration?: VirtualMachineConfiguration; + vmSize?: string; } // @public @@ -1281,6 +1313,7 @@ export interface BatchTaskContainerExecutionInfoOutput { // @public export interface BatchTaskContainerSettings { + containerHostBatchBindMounts?: Array; containerRunOptions?: string; imageName: string; registry?: ContainerRegistryReference; @@ -1289,6 +1322,7 @@ export interface BatchTaskContainerSettings { // @public export interface BatchTaskContainerSettingsOutput { + containerHostBatchBindMounts?: Array; containerRunOptions?: string; imageName: string; registry?: ContainerRegistryReferenceOutput; @@ -1555,6 +1589,24 @@ export interface ContainerConfigurationOutput { type: ContainerTypeOutput; } +// @public +export interface ContainerHostBatchBindMountEntry { + isReadOnly?: boolean; + source?: ContainerHostDataPath; +} + +// @public +export interface ContainerHostBatchBindMountEntryOutput { + isReadOnly?: boolean; + source?: ContainerHostDataPathOutput; +} + +// @public +export type ContainerHostDataPath = string; + +// @public +export type ContainerHostDataPathOutput = string; + // @public export interface ContainerRegistryReference { identityReference?: BatchNodeIdentityReference; @@ -2013,6 +2065,73 @@ export interface DataDiskOutput { storageAccountType?: StorageAccountTypeOutput; } +// @public (undocumented) +export interface DeallocateNode { + post(options: DeallocateNodeParameters): StreamableMethod; +} + +// @public (undocumented) +export interface DeallocateNode202Headers { + "client-request-id"?: string; + "last-modified"?: string; + "request-id"?: string; + dataserviceid: string; + etag?: string; +} + +// @public +export interface DeallocateNode202Response extends HttpResponse { + // (undocumented) + headers: RawHttpHeaders & DeallocateNode202Headers; + // (undocumented) + status: "202"; +} + +// @public (undocumented) +export interface DeallocateNodeBodyParam { + body?: BatchNodeDeallocateContent; +} + +// @public (undocumented) +export interface DeallocateNodeDefaultResponse extends HttpResponse { + // (undocumented) + body: BatchErrorOutput; + // (undocumented) + status: string; +} + +// @public (undocumented) +export interface DeallocateNodeHeaderParam { + // (undocumented) + headers?: RawHttpHeadersInput & DeallocateNodeHeaders; +} + +// @public (undocumented) +export interface DeallocateNodeHeaders { + "client-request-id"?: string; + "ocp-date"?: string; + "return-client-request-id"?: boolean; +} + +// @public (undocumented) +export interface DeallocateNodeMediaTypesParam { + contentType: "application/json; odata=minimalmetadata"; +} + +// @public (undocumented) +export type DeallocateNodeParameters = DeallocateNodeQueryParam & DeallocateNodeHeaderParam & DeallocateNodeMediaTypesParam & DeallocateNodeBodyParam & RequestParameters; + +// @public (undocumented) +export interface DeallocateNodeQueryParam { + // (undocumented) + queryParameters?: DeallocateNodeQueryParamProperties; +} + +// @public (undocumented) +export interface DeallocateNodeQueryParamProperties { + timeOut?: number; +} + // @public (undocumented) export interface DeleteJob { delete(options?: DeleteJobParameters): StreamableMethod; @@ -2071,6 +2190,7 @@ export interface DeleteJobQueryParam { // @public (undocumented) export interface DeleteJobQueryParamProperties { + force?: boolean; timeOut?: number; } @@ -2124,6 +2244,7 @@ export interface DeleteJobScheduleQueryParam { // @public (undocumented) export interface DeleteJobScheduleQueryParamProperties { + force?: boolean; timeOut?: number; } @@ -3918,8 +4039,10 @@ export interface HttpHeaderOutput { // @public export interface ImageReference { + communityGalleryImageId?: string; offer?: string; publisher?: string; + sharedGalleryImageId?: string; sku?: string; version?: string; virtualMachineImageId?: string; @@ -3927,9 +4050,11 @@ export interface ImageReference { // @public export interface ImageReferenceOutput { + communityGalleryImageId?: string; readonly exactVersion?: string; offer?: string; publisher?: string; + sharedGalleryImageId?: string; sku?: string; version?: string; virtualMachineImageId?: string; @@ -4163,6 +4288,15 @@ export function isUnexpected(response: GetNode200Response | GetNodeDefaultRespon // @public (undocumented) export function isUnexpected(response: RebootNode202Response | RebootNodeDefaultResponse): response is RebootNodeDefaultResponse; +// @public (undocumented) +export function isUnexpected(response: StartNode202Response | StartNodeDefaultResponse): response is StartNodeDefaultResponse; + +// @public (undocumented) +export function isUnexpected(response: DeallocateNode202Response | DeallocateNodeDefaultResponse): response is DeallocateNodeDefaultResponse; + +// @public (undocumented) +export function isUnexpected(response: ReimageNode202Response | ReimageNodeDefaultResponse): response is ReimageNodeDefaultResponse; + // @public (undocumented) export function isUnexpected(response: DisableNodeScheduling200Response | DisableNodeSchedulingDefaultResponse): response is DisableNodeSchedulingDefaultResponse; @@ -5182,12 +5316,14 @@ export type LoginModeOutput = string; // @public export interface ManagedDisk { - storageAccountType: StorageAccountType; + securityProfile?: VMDiskSecurityProfile; + storageAccountType?: StorageAccountType; } // @public export interface ManagedDiskOutput { - storageAccountType: StorageAccountTypeOutput; + securityProfile?: VMDiskSecurityProfileOutput; + storageAccountType?: StorageAccountTypeOutput; } // @public @@ -5615,6 +5751,73 @@ export interface RecentBatchJobOutput { url?: string; } +// @public (undocumented) +export interface ReimageNode { + post(options: ReimageNodeParameters): StreamableMethod; +} + +// @public (undocumented) +export interface ReimageNode202Headers { + "client-request-id"?: string; + "last-modified"?: string; + "request-id"?: string; + dataserviceid: string; + etag?: string; +} + +// @public +export interface ReimageNode202Response extends HttpResponse { + // (undocumented) + headers: RawHttpHeaders & ReimageNode202Headers; + // (undocumented) + status: "202"; +} + +// @public (undocumented) +export interface ReimageNodeBodyParam { + body?: BatchNodeReimageContent; +} + +// @public (undocumented) +export interface ReimageNodeDefaultResponse extends HttpResponse { + // (undocumented) + body: BatchErrorOutput; + // (undocumented) + status: string; +} + +// @public (undocumented) +export interface ReimageNodeHeaderParam { + // (undocumented) + headers?: RawHttpHeadersInput & ReimageNodeHeaders; +} + +// @public (undocumented) +export interface ReimageNodeHeaders { + "client-request-id"?: string; + "ocp-date"?: string; + "return-client-request-id"?: boolean; +} + +// @public (undocumented) +export interface ReimageNodeMediaTypesParam { + contentType: "application/json; odata=minimalmetadata"; +} + +// @public (undocumented) +export type ReimageNodeParameters = ReimageNodeQueryParam & ReimageNodeHeaderParam & ReimageNodeMediaTypesParam & ReimageNodeBodyParam & RequestParameters; + +// @public (undocumented) +export interface ReimageNodeQueryParam { + // (undocumented) + queryParameters?: ReimageNodeQueryParamProperties; +} + +// @public (undocumented) +export interface ReimageNodeQueryParamProperties { + timeOut?: number; +} + // @public (undocumented) export interface RemoveNodes { post(options: RemoveNodesParameters): StreamableMethod; @@ -6176,6 +6379,9 @@ export interface Routes { (path: "/pools/{poolId}/nodes/{nodeId}/users/{userName}", poolId: string, nodeId: string, userName: string): DeleteNodeUser; (path: "/pools/{poolId}/nodes/{nodeId}", poolId: string, nodeId: string): GetNode; (path: "/pools/{poolId}/nodes/{nodeId}/reboot", poolId: string, nodeId: string): RebootNode; + (path: "/pools/{poolId}/nodes/{nodeId}/start", poolId: string, nodeId: string): StartNode; + (path: "/pools/{poolId}/nodes/{nodeId}/deallocate", poolId: string, nodeId: string): DeallocateNode; + (path: "/pools/{poolId}/nodes/{nodeId}/reimage", poolId: string, nodeId: string): ReimageNode; (path: "/pools/{poolId}/nodes/{nodeId}/disablescheduling", poolId: string, nodeId: string): DisableNodeScheduling; (path: "/pools/{poolId}/nodes/{nodeId}/enablescheduling", poolId: string, nodeId: string): EnableNodeScheduling; (path: "/pools/{poolId}/nodes/{nodeId}/remoteloginsettings", poolId: string, nodeId: string): GetNodeRemoteLoginSettings; @@ -6190,6 +6396,12 @@ export interface Routes { // @public export type SchedulingStateOutput = string; +// @public +export type SecurityEncryptionTypes = string; + +// @public +export type SecurityEncryptionTypesOutput = string; + // @public export interface SecurityProfile { encryptionAtHost: boolean; @@ -6220,6 +6432,63 @@ export interface ServiceArtifactReferenceOutput { id: string; } +// @public (undocumented) +export interface StartNode { + post(options?: StartNodeParameters): StreamableMethod; +} + +// @public (undocumented) +export interface StartNode202Headers { + "client-request-id"?: string; + "last-modified"?: string; + "request-id"?: string; + dataserviceid: string; + etag?: string; +} + +// @public +export interface StartNode202Response extends HttpResponse { + // (undocumented) + headers: RawHttpHeaders & StartNode202Headers; + // (undocumented) + status: "202"; +} + +// @public (undocumented) +export interface StartNodeDefaultResponse extends HttpResponse { + // (undocumented) + body: BatchErrorOutput; + // (undocumented) + status: string; +} + +// @public (undocumented) +export interface StartNodeHeaderParam { + // (undocumented) + headers?: RawHttpHeadersInput & StartNodeHeaders; +} + +// @public (undocumented) +export interface StartNodeHeaders { + "client-request-id"?: string; + "ocp-date"?: string; + "return-client-request-id"?: boolean; +} + +// @public (undocumented) +export type StartNodeParameters = StartNodeQueryParam & StartNodeHeaderParam & RequestParameters; + +// @public (undocumented) +export interface StartNodeQueryParam { + // (undocumented) + queryParameters?: StartNodeQueryParamProperties; +} + +// @public (undocumented) +export interface StartNodeQueryParamProperties { + timeOut?: number; +} + // @public export type StatusLevelTypesOutput = string; @@ -6358,6 +6627,7 @@ export interface TerminateJobQueryParam { // @public (undocumented) export interface TerminateJobQueryParamProperties { + force?: boolean; timeOut?: number; } @@ -6419,6 +6689,7 @@ export interface TerminateJobScheduleQueryParam { // @public (undocumented) export interface TerminateJobScheduleQueryParamProperties { + force?: boolean; timeOut?: number; } @@ -6870,6 +7141,16 @@ export interface VirtualMachineInfoOutput { scaleSetVmResourceId?: string; } +// @public +export interface VMDiskSecurityProfile { + securityEncryptionType?: SecurityEncryptionTypes; +} + +// @public +export interface VMDiskSecurityProfileOutput { + securityEncryptionType?: SecurityEncryptionTypesOutput; +} + // @public export interface VMExtension { autoUpgradeMinorVersion?: boolean; diff --git a/sdk/batch/batch-rest/src/batchClient.ts b/sdk/batch/batch-rest/src/batchClient.ts index bb10ce5c46fe..3be0f2f8f387 100644 --- a/sdk/batch/batch-rest/src/batchClient.ts +++ b/sdk/batch/batch-rest/src/batchClient.ts @@ -23,10 +23,10 @@ export interface BatchClientOptions extends ClientOptions { export default function createClient( endpointParam: string, credentials: TokenCredential | AzureNamedKeyCredential, - { apiVersion = "2024-02-01.19.0", ...options }: BatchClientOptions = {}, + { apiVersion = "2024-07-01.20.0", ...options }: BatchClientOptions = {}, ): BatchClient { const endpointUrl = options.endpoint ?? options.baseUrl ?? `${endpointParam}`; - const userAgentInfo = `azsdk-js-batch-rest/1.0.0-beta.1`; + const userAgentInfo = `azsdk-js-batch-rest/1.0.0-beta.2`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix ? `${options.userAgentOptions.userAgentPrefix} ${userAgentInfo}` @@ -84,6 +84,7 @@ export default function createClient( // If the credentials are not a TokenCredential, we need to add a policy to handle the shared key auth. const client = getClient(endpointUrl, options) as BatchClient; const authPolicy = createBatchSharedKeyCredentialsPolicy(credentials); + addClientApiVersionPolicy(client); client.pipeline.addPolicy(authPolicy); - return addClientApiVersionPolicy(client); + return client; } diff --git a/sdk/batch/batch-rest/src/clientDefinitions.ts b/sdk/batch/batch-rest/src/clientDefinitions.ts index fb7f59e47fdf..b6c701916d3a 100644 --- a/sdk/batch/batch-rest/src/clientDefinitions.ts +++ b/sdk/batch/batch-rest/src/clientDefinitions.ts @@ -60,6 +60,9 @@ import { ReplaceNodeUserParameters, GetNodeParameters, RebootNodeParameters, + StartNodeParameters, + DeallocateNodeParameters, + ReimageNodeParameters, DisableNodeSchedulingParameters, EnableNodeSchedulingParameters, GetNodeRemoteLoginSettingsParameters, @@ -191,6 +194,12 @@ import { GetNodeDefaultResponse, RebootNode202Response, RebootNodeDefaultResponse, + StartNode202Response, + StartNodeDefaultResponse, + DeallocateNode202Response, + DeallocateNodeDefaultResponse, + ReimageNode202Response, + ReimageNodeDefaultResponse, DisableNodeScheduling200Response, DisableNodeSchedulingDefaultResponse, EnableNodeScheduling200Response, @@ -265,7 +274,7 @@ export interface CreatePool { post( options: CreatePoolParameters, ): StreamableMethod; - /** Lists all of the Pools in the specified Account. */ + /** Lists all of the Pools which be mounted. */ get( options?: ListPoolsParameters, ): StreamableMethod; @@ -777,6 +786,31 @@ export interface RebootNode { ): StreamableMethod; } +export interface StartNode { + /** You can start a Compute Node only if it has been deallocated. */ + post( + options?: StartNodeParameters, + ): StreamableMethod; +} + +export interface DeallocateNode { + /** You can deallocate a Compute Node only if it is in an idle or running state. */ + post( + options: DeallocateNodeParameters, + ): StreamableMethod; +} + +export interface ReimageNode { + /** + * You can reinstall the operating system on a Compute Node only if it is in an + * idle or running state. This API can be invoked only on Pools created with the + * cloud service configuration property. + */ + post( + options: ReimageNodeParameters, + ): StreamableMethod; +} + export interface DisableNodeScheduling { /** * You can disable Task scheduling on a Compute Node only if its current @@ -799,9 +833,8 @@ export interface EnableNodeScheduling { export interface GetNodeRemoteLoginSettings { /** - * Before you can remotely login to a Compute Node using the remote login - * settings, you must create a user Account on the Compute Node. This API can be - * invoked only on Pools created with the virtual machine configuration property. + * Before you can remotely login to a Compute Node using the remote login settings, + * you must create a user Account on the Compute Node. */ get( options?: GetNodeRemoteLoginSettingsParameters, @@ -957,6 +990,16 @@ export interface Routes { (path: "/pools/{poolId}/nodes/{nodeId}", poolId: string, nodeId: string): GetNode; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/reboot' has methods for the following verbs: post */ (path: "/pools/{poolId}/nodes/{nodeId}/reboot", poolId: string, nodeId: string): RebootNode; + /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/start' has methods for the following verbs: post */ + (path: "/pools/{poolId}/nodes/{nodeId}/start", poolId: string, nodeId: string): StartNode; + /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/deallocate' has methods for the following verbs: post */ + ( + path: "/pools/{poolId}/nodes/{nodeId}/deallocate", + poolId: string, + nodeId: string, + ): DeallocateNode; + /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/reimage' has methods for the following verbs: post */ + (path: "/pools/{poolId}/nodes/{nodeId}/reimage", poolId: string, nodeId: string): ReimageNode; /** Resource for '/pools/\{poolId\}/nodes/\{nodeId\}/disablescheduling' has methods for the following verbs: post */ ( path: "/pools/{poolId}/nodes/{nodeId}/disablescheduling", diff --git a/sdk/batch/batch-rest/src/isUnexpected.ts b/sdk/batch/batch-rest/src/isUnexpected.ts index 9115381aa031..5e6373b0e33c 100644 --- a/sdk/batch/batch-rest/src/isUnexpected.ts +++ b/sdk/batch/batch-rest/src/isUnexpected.ts @@ -120,6 +120,12 @@ import { GetNodeDefaultResponse, RebootNode202Response, RebootNodeDefaultResponse, + StartNode202Response, + StartNodeDefaultResponse, + DeallocateNode202Response, + DeallocateNodeDefaultResponse, + ReimageNode202Response, + ReimageNodeDefaultResponse, DisableNodeScheduling200Response, DisableNodeSchedulingDefaultResponse, EnableNodeScheduling200Response, @@ -203,6 +209,9 @@ const responseMap: Record = { "PUT /pools/{poolId}/nodes/{nodeId}/users/{userName}": ["200"], "GET /pools/{poolId}/nodes/{nodeId}": ["200"], "POST /pools/{poolId}/nodes/{nodeId}/reboot": ["202"], + "POST /pools/{poolId}/nodes/{nodeId}/start": ["202"], + "POST /pools/{poolId}/nodes/{nodeId}/deallocate": ["202"], + "POST /pools/{poolId}/nodes/{nodeId}/reimage": ["202"], "POST /pools/{poolId}/nodes/{nodeId}/disablescheduling": ["200"], "POST /pools/{poolId}/nodes/{nodeId}/enablescheduling": ["200"], "GET /pools/{poolId}/nodes/{nodeId}/remoteloginsettings": ["200"], @@ -395,6 +404,15 @@ export function isUnexpected( export function isUnexpected( response: RebootNode202Response | RebootNodeDefaultResponse, ): response is RebootNodeDefaultResponse; +export function isUnexpected( + response: StartNode202Response | StartNodeDefaultResponse, +): response is StartNodeDefaultResponse; +export function isUnexpected( + response: DeallocateNode202Response | DeallocateNodeDefaultResponse, +): response is DeallocateNodeDefaultResponse; +export function isUnexpected( + response: ReimageNode202Response | ReimageNodeDefaultResponse, +): response is ReimageNodeDefaultResponse; export function isUnexpected( response: DisableNodeScheduling200Response | DisableNodeSchedulingDefaultResponse, ): response is DisableNodeSchedulingDefaultResponse; @@ -548,6 +566,12 @@ export function isUnexpected( | GetNodeDefaultResponse | RebootNode202Response | RebootNodeDefaultResponse + | StartNode202Response + | StartNodeDefaultResponse + | DeallocateNode202Response + | DeallocateNodeDefaultResponse + | ReimageNode202Response + | ReimageNodeDefaultResponse | DisableNodeScheduling200Response | DisableNodeSchedulingDefaultResponse | EnableNodeScheduling200Response @@ -629,6 +653,9 @@ export function isUnexpected( | ReplaceNodeUserDefaultResponse | GetNodeDefaultResponse | RebootNodeDefaultResponse + | StartNodeDefaultResponse + | DeallocateNodeDefaultResponse + | ReimageNodeDefaultResponse | DisableNodeSchedulingDefaultResponse | EnableNodeSchedulingDefaultResponse | GetNodeRemoteLoginSettingsDefaultResponse diff --git a/sdk/batch/batch-rest/src/models.ts b/sdk/batch/batch-rest/src/models.ts index 1f444d6698a7..8f910053f15c 100644 --- a/sdk/batch/batch-rest/src/models.ts +++ b/sdk/batch/batch-rest/src/models.ts @@ -43,7 +43,11 @@ export interface BatchPoolCreateContent { metadata?: Array; /** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. */ mountConfiguration?: Array; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + /** + * The desired node communication mode for the pool. If omitted, the default value is Default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationMode; /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */ upgradePolicy?: UpgradePolicy; @@ -60,7 +64,7 @@ export interface VirtualMachineConfiguration { nodeAgentSKUId: string; /** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */ windowsConfiguration?: WindowsConfiguration; - /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ + /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ dataDisks?: Array; /** * This only applies to Images that contain the Windows operating system, and @@ -106,6 +110,10 @@ export interface ImageReference { version?: string; /** The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ virtualMachineImageId?: string; + /** The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. */ + sharedGalleryImageId?: string; + /** The community gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from community gallery image GET call. */ + communityGalleryImageId?: string; } /** Windows operating system settings to apply to the virtual machine. */ @@ -122,17 +130,29 @@ export interface WindowsConfiguration { export interface DataDisk { /** The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct logicalUnitNumber. The value must be between 0 and 63, inclusive. */ lun: number; - /** The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. */ + /** + * The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + * + * Possible values: "none", "readonly", "readwrite" + */ caching?: CachingType; /** The initial disk size in gigabytes. */ diskSizeGB: number; - /** The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". */ + /** + * The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". + * + * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs" + */ storageAccountType?: StorageAccountType; } /** The configuration for container-enabled Pools. */ export interface ContainerConfiguration { - /** The container technology to be used. */ + /** + * The container technology to be used. + * + * Possible values: "dockerCompatible", "criCompatible" + */ type: ContainerType; /** The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. */ containerImageNames?: string[]; @@ -167,7 +187,7 @@ export interface BatchNodeIdentityReference { * Azure Compute Gallery Image. */ export interface DiskEncryptionConfiguration { - /** The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */ + /** The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. */ targets?: DiskEncryptionTarget[]; } @@ -177,7 +197,11 @@ export interface DiskEncryptionConfiguration { * with best effort balancing. */ export interface BatchNodePlacementConfiguration { - /** Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. */ + /** + * Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. + * + * Possible values: "Shared", "Startup", "VfsMounts", "Task", "JobPrep", "Applications" + */ policy?: BatchNodePlacementPolicyType; } @@ -207,7 +231,11 @@ export interface VMExtension { export interface OSDisk { /** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */ ephemeralOSDiskSettings?: DiffDiskSettings; - /** Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. */ + /** + * Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. + * + * Possible values: "none", "readonly", "readwrite" + */ caching?: CachingType; /** The initial disk size in GB when creating new OS disk. */ diskSizeGB?: number; @@ -222,21 +250,45 @@ export interface OSDisk { * compute node (VM). */ export interface DiffDiskSettings { - /** Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */ + /** + * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + * + * Possible values: "cachedisk" + */ placement?: DiffDiskPlacement; } /** The managed disk parameters. */ export interface ManagedDisk { - /** The storage account type for managed disk. */ - storageAccountType: StorageAccountType; + /** + * The storage account type for managed disk. + * + * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs" + */ + storageAccountType?: StorageAccountType; + /** Specifies the security profile settings for the managed disk. */ + securityProfile?: VMDiskSecurityProfile; +} + +/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. */ +export interface VMDiskSecurityProfile { + /** + * Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs. + * + * Possible values: "NonPersistedTPM", "VMGuestStateOnly" + */ + securityEncryptionType?: SecurityEncryptionTypes; } /** Specifies the security profile settings for the virtual machine or virtual machine scale set. */ export interface SecurityProfile { - /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. */ + /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. */ encryptionAtHost: boolean; - /** Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. */ + /** + * Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. + * + * Possible values: "trustedLaunch", "confidentialVM" + */ securityType: SecurityTypes; /** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */ uefiSettings: UefiSettings; @@ -261,13 +313,17 @@ export interface ServiceArtifactReference { /** The network configuration for a Pool. */ export interface NetworkConfiguration { - /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. Enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId?: string; - /** The scope of dynamic vnet assignment. */ + /** + * The scope of dynamic vnet assignment. + * + * Possible values: "none", "job" + */ dynamicVNetAssignmentScope?: DynamicVNetAssignmentScope; - /** The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. */ + /** The configuration for endpoints on Compute Nodes in the Batch Pool. */ endpointConfiguration?: BatchPoolEndpointConfiguration; - /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. */ + /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. */ publicIPAddressConfiguration?: PublicIpAddressConfiguration; /** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */ enableAcceleratedNetworking?: boolean; @@ -286,7 +342,11 @@ export interface BatchPoolEndpointConfiguration { export interface InboundNatPool { /** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */ name: string; - /** The protocol of the endpoint. */ + /** + * The protocol of the endpoint. + * + * Possible values: "tcp", "udp" + */ protocol: InboundEndpointProtocol; /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */ backendPort: number; @@ -302,7 +362,11 @@ export interface InboundNatPool { export interface NetworkSecurityGroupRule { /** The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. */ priority: number; - /** The action that should be taken for a specified IP address, subnet range or tag. */ + /** + * The action that should be taken for a specified IP address, subnet range or tag. + * + * Possible values: "allow", "deny" + */ access: NetworkSecurityGroupRuleAccess; /** The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. */ sourceAddressPrefix: string; @@ -312,7 +376,11 @@ export interface NetworkSecurityGroupRule { /** The public IP Address configuration of the networking configuration of a Pool. */ export interface PublicIpAddressConfiguration { - /** The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. */ + /** + * The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. + * + * Possible values: "batchmanaged", "usermanaged", "nopublicipaddresses" + */ provision?: IpAddressProvisioningType; /** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */ ipAddressIds?: string[]; @@ -334,7 +402,7 @@ export interface PublicIpAddressConfiguration { * block Batch from being able to re-run the StartTask. */ export interface BatchStartTask { - /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettings; @@ -358,8 +426,26 @@ export interface BatchTaskContainerSettings { imageName: string; /** The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. */ registry?: ContainerRegistryReference; - /** The location of the container Task working directory. The default is 'taskWorkingDirectory'. */ + /** + * The location of the container Task working directory. The default is 'taskWorkingDirectory'. + * + * Possible values: "taskWorkingDirectory", "containerImageDefault" + */ workingDirectory?: ContainerWorkingDirectory; + /** The paths you want to mounted to container task. If this array is null or be not present, container task will mount entire temporary disk drive in windows (or AZ_BATCH_NODE_ROOT_DIR in Linux). It won't' mount any data paths into container if this array is set as empty. */ + containerHostBatchBindMounts?: Array; +} + +/** The entry of path and mount mode you want to mount into task container. */ +export interface ContainerHostBatchBindMountEntry { + /** + * The path which be mounted to container customer can select. + * + * Possible values: "regional", "zonal" + */ + source?: ContainerHostDataPath; + /** Mount this source path as read-only mode or not. Default value is false (read/write mode). For Linux, if you mount this path as a read/write mode, this does not mean that all users in container have the read/write access for the path, it depends on the access in host VM. If this path is mounted read-only, all users within the container will not be able to modify the path. */ + isReadOnly?: boolean; } /** A single file or multiple files to be downloaded to a Compute Node. */ @@ -398,9 +484,17 @@ export interface UserIdentity { /** Specifies the options for the auto user that runs an Azure Batch Task. */ export interface AutoUserSpecification { - /** The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. */ + /** + * The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. + * + * Possible values: "task", "pool" + */ scope?: AutoUserScope; - /** The elevation level of the auto user. The default value is nonAdmin. */ + /** + * The elevation level of the auto user. The default value is nonAdmin. + * + * Possible values: "nonadmin", "admin" + */ elevationLevel?: ElevationLevel; } @@ -414,7 +508,11 @@ export interface BatchApplicationPackageReference { /** Specifies how Tasks should be distributed across Compute Nodes. */ export interface BatchTaskSchedulingPolicy { - /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + /** + * How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + * + * Possible values: "spread", "pack" + */ nodeFillType: BatchNodeFillType; } @@ -427,7 +525,11 @@ export interface UserAccount { name: string; /** The password for the user Account. */ password: string; - /** The elevation level of the user Account. The default value is nonAdmin. */ + /** + * The elevation level of the user Account. The default value is nonAdmin. + * + * Possible values: "nonadmin", "admin" + */ elevationLevel?: ElevationLevel; /** The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. */ linuxUserConfiguration?: LinuxUserConfiguration; @@ -447,7 +549,11 @@ export interface LinuxUserConfiguration { /** Properties used to create a user Account on a Windows Compute Node. */ export interface WindowsUserConfiguration { - /** The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch'. */ + /** + * The login mode for the user. The default is 'batch'. + * + * Possible values: "batch", "interactive" + */ loginMode?: LoginMode; } @@ -532,11 +638,15 @@ export interface AzureFileShareConfiguration { /** Describes an upgrade policy - automatic, manual, or rolling. */ export interface UpgradePolicy { - /** Specifies the mode of an upgrade to virtual machines in the scale set.

Possible values are:

**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

**Automatic** - All virtual machines in the scale set are automatically updated at the same time.

**Rolling** - Scale set performs updates in batches with an optional pause time in between. */ + /** + * Specifies the mode of an upgrade to virtual machines in the scale set.

Possible values are:

**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

**Automatic** - All virtual machines in the scale set are automatically updated at the same time.

**Rolling** - Scale set performs updates in batches with an optional pause time in between. + * + * Possible values: "automatic", "manual", "rolling" + */ mode: UpgradeMode; /** Configuration parameters used for performing automatic OS Upgrade. The configuration parameters used for performing automatic OS upgrade. */ automaticOSUpgradePolicy?: AutomaticOsUpgradePolicy; - /** The configuration parameters used while performing a rolling upgrade. This property is only supported on Pools with the virtualMachineConfiguration property. */ + /** The configuration parameters used while performing a rolling upgrade. */ rollingUpgradePolicy?: RollingUpgradePolicy; } @@ -544,7 +654,7 @@ export interface UpgradePolicy { export interface AutomaticOsUpgradePolicy { /** Whether OS image rollback feature should be disabled. */ disableAutomaticRollback?: boolean; - /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. */ + /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. */ enableAutomaticOSUpgrade?: boolean; /** Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. */ useRollingUpgradePolicy?: boolean; @@ -580,19 +690,45 @@ export interface NameValuePair { /** Parameters for updating an Azure Batch Pool. */ export interface BatchPoolUpdateContent { + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. This field can be updated only when the pool is empty. */ + displayName?: string; + /** The size of virtual machines in the Pool. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).

This field can be updated only when the pool is empty. */ + vmSize?: string; + /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.

This field can be updated only when the pool is empty. */ + enableInterNodeCommunication?: boolean; /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. */ startTask?: BatchStartTask; /** A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. */ applicationPackageReferences?: Array; /** A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. */ metadata?: Array; - /** The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. */ + /** The virtual machine configuration for the Pool. This property must be specified.

This field can be updated only when the pool is empty. */ + virtualMachineConfiguration?: VirtualMachineConfiguration; + /** + * The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationMode; + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.

This field can be updated only when the pool is empty. */ + taskSlotsPerNode?: number; + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.

This field can be updated only when the pool is empty. */ + taskSchedulingPolicy?: BatchTaskSchedulingPolicy; + /** The network configuration for the Pool. This field can be updated only when the pool is empty. */ + networkConfiguration?: NetworkConfiguration; + /** The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'.

This field can be updated only when the pool is empty. */ + resourceTags?: Record; + /** The list of user Accounts to be created on each Compute Node in the Pool. This field can be updated only when the pool is empty. */ + userAccounts?: Array; + /** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.

This field can be updated only when the pool is empty. */ + mountConfiguration?: Array; + /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.

This field can be updated only when the pool is empty. */ + upgradePolicy?: UpgradePolicy; } /** Parameters for enabling automatic scaling on an Azure Batch Pool. */ export interface BatchPoolEnableAutoScaleContent { - /** The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ + /** The formula for the desired number of Compute Nodes in the Pool. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */ autoScaleFormula?: string; /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */ autoScaleEvaluationInterval?: string; @@ -600,7 +736,7 @@ export interface BatchPoolEnableAutoScaleContent { /** Parameters for evaluating an automatic scaling formula on an Azure Batch Pool. */ export interface BatchPoolEvaluateAutoScaleContent { - /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ + /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling). */ autoScaleFormula: string; } @@ -612,7 +748,11 @@ export interface BatchPoolResizeContent { targetLowPriorityNodes?: number; /** The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ resizeTimeout?: string; - /** Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. */ + /** + * Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ nodeDeallocationOption?: BatchNodeDeallocationOption; } @@ -624,7 +764,11 @@ export interface BatchPoolReplaceContent { applicationPackageReferences: Array; /** A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. */ metadata: Array; - /** The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. */ + /** + * The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationMode; } @@ -634,7 +778,11 @@ export interface BatchNodeRemoveContent { nodeList: string[]; /** The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ resizeTimeout?: string; - /** Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. */ + /** + * Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ nodeDeallocationOption?: BatchNodeDeallocationOption; } @@ -650,7 +798,11 @@ export interface BatchJob { constraints?: BatchJobConstraints; /** The Pool settings associated with the Job. */ poolInfo: BatchPoolInfo; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksComplete; /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ metadata?: Array; @@ -694,7 +846,7 @@ export interface BatchJobManagerTask { id: string; /** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettings; @@ -755,7 +907,7 @@ export interface OutputFileBlobContainerDestination { containerUrl: string; /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ identityReference?: BatchNodeIdentityReference; - /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ + /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ uploadHeaders?: Array; } @@ -772,7 +924,11 @@ export interface HttpHeader { * to perform the upload. */ export interface OutputFileUploadConfig { - /** The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. */ + /** + * The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. + * + * Possible values: "tasksuccess", "taskfailure", "taskcompletion" + */ uploadCondition: OutputFileUploadCondition; } @@ -826,7 +982,7 @@ export interface AuthenticationTokenSettings { export interface BatchJobPreparationTask { /** A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettings; @@ -865,7 +1021,7 @@ export interface BatchJobPreparationTask { export interface BatchJobReleaseTask { /** A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettings; @@ -896,7 +1052,11 @@ export interface BatchPoolInfo { export interface BatchAutoPoolSpecification { /** A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. */ autoPoolIdPrefix?: string; - /** The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. */ + /** + * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + * + * Possible values: "jobschedule", "job" + */ poolLifetimeOption: BatchPoolLifetimeOption; /** Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option. */ keepAlive?: boolean; @@ -910,7 +1070,7 @@ export interface BatchPoolSpecification { displayName?: string; /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ vmSize: string; - /** The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** The virtual machine configuration for the Pool. This property must be specified. */ virtualMachineConfiguration?: VirtualMachineConfiguration; /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ taskSlotsPerNode?: number; @@ -944,7 +1104,11 @@ export interface BatchPoolSpecification { metadata?: Array; /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ mountConfiguration?: Array; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + /** + * The desired node communication mode for the pool. If omitted, the default value is Default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationMode; /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */ upgradePolicy?: UpgradePolicy; @@ -952,8 +1116,10 @@ export interface BatchPoolSpecification { /** The network configuration for the Job. */ export interface BatchJobNetworkConfiguration { - /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId: string; + /** Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. */ + skipWithdrawFromVNet: boolean; } /** Contains information about the execution of a Job in the Azure Batch service. */ @@ -972,7 +1138,11 @@ export interface BatchJobExecutionInfo { /** An error encountered by the Batch service when scheduling a Job. */ export interface BatchJobSchedulingError { - /** The category of the Job scheduling error. */ + /** + * The category of the Job scheduling error. + * + * Possible values: "usererror", "servererror" + */ category: ErrorCategory; /** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; @@ -1026,15 +1196,25 @@ export interface BatchJobUpdateContent { constraints?: BatchJobConstraints; /** The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. */ poolInfo?: BatchPoolInfo; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** + * The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksComplete; /** A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. */ metadata?: Array; + /** The network configuration for the Job. */ + networkConfiguration?: BatchJobNetworkConfiguration; } /** Parameters for disabling an Azure Batch Job. */ export interface BatchJobDisableContent { - /** What to do with active Tasks associated with the Job. */ + /** + * What to do with active Tasks associated with the Job. + * + * Possible values: "requeue", "terminate", "wait" + */ disableTasks: DisableBatchJobOption; } @@ -1070,9 +1250,17 @@ export interface BatchJobCreateContent { commonEnvironmentSettings?: Array; /** The Pool on which the Batch service runs the Job's Tasks. */ poolInfo: BatchPoolInfo; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksComplete; - /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + /** + * The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + * + * Possible values: "noaction", "performexitoptionsjobaction" + */ onTaskFailure?: OnBatchTaskFailure; /** The network configuration for the Job. */ networkConfiguration?: BatchJobNetworkConfiguration; @@ -1092,7 +1280,11 @@ export interface BatchTaskContainerExecutionInfo { /** Information about a Task failure. */ export interface BatchTaskFailureInfo { - /** The category of the Task error. */ + /** + * The category of the Task error. + * + * Possible values: "usererror", "servererror" + */ category: ErrorCategory; /** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; @@ -1142,9 +1334,17 @@ export interface BatchJobSpecification { displayName?: string; /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ usesTaskDependencies?: boolean; - /** The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksComplete; - /** The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + /** + * The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + * + * Possible values: "noaction", "performexitoptionsjobaction" + */ onTaskFailure?: OnBatchTaskFailure; /** The network configuration for the Job. */ networkConfiguration?: BatchJobNetworkConfiguration; @@ -1304,9 +1504,17 @@ export interface ExitCodeMapping { /** Specifies how the Batch service responds to a particular exit condition. */ export interface ExitOptions { - /** An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** + * An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + * + * Possible values: "none", "disable", "terminate" + */ jobAction?: BatchJobAction; - /** An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. */ + /** + * An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. + * + * Possible values: "satisfy", "block" + */ dependencyAction?: DependencyAction; } @@ -1406,7 +1614,11 @@ export interface BatchTaskExecutionInfo { requeueCount: number; /** The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. */ lastRequeueTime?: Date | string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResult; } @@ -1466,7 +1678,7 @@ export interface BatchNodeUserCreateContent { isAdmin?: boolean; /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ expiryTime?: Date | string; - /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. */ + /** The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. */ password?: string; /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ sshPublicKey?: string; @@ -1474,7 +1686,7 @@ export interface BatchNodeUserCreateContent { /** Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. */ export interface BatchNodeUserUpdateContent { - /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. */ + /** The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. */ password?: string; /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ expiryTime?: Date | string; @@ -1484,13 +1696,41 @@ export interface BatchNodeUserUpdateContent { /** Parameters for rebooting an Azure Batch Compute Node. */ export interface BatchNodeRebootContent { - /** When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. */ + /** + * When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ nodeRebootOption?: BatchNodeRebootOption; } +/** Options for deallocating a Compute Node. */ +export interface BatchNodeDeallocateContent { + /** + * When to deallocate the Compute Node and what to do with currently running Tasks. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ + nodeDeallocateOption?: BatchNodeDeallocateOption; +} + +/** Parameters for reimaging an Azure Batch Compute Node. */ +export interface BatchNodeReimageContent { + /** + * When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion", "retaineddata" + */ + nodeReimageOption?: BatchNodeReimageOption; +} + /** Parameters for disabling scheduling on an Azure Batch Compute Node. */ export interface BatchNodeDisableSchedulingContent { - /** What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. */ + /** + * What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. + * + * Possible values: "requeue", "terminate", "taskcompletion" + */ nodeDisableSchedulingOption?: BatchNodeDisableSchedulingOption; } @@ -1518,6 +1758,8 @@ export type DiskEncryptionTarget = string; export type BatchNodePlacementPolicyType = string; /** Alias for DiffDiskPlacement */ export type DiffDiskPlacement = string; +/** Alias for SecurityEncryptionTypes */ +export type SecurityEncryptionTypes = string; /** Alias for SecurityTypes */ export type SecurityTypes = string; /** Alias for DynamicVNetAssignmentScope */ @@ -1530,6 +1772,8 @@ export type NetworkSecurityGroupRuleAccess = string; export type IpAddressProvisioningType = string; /** Alias for ContainerWorkingDirectory */ export type ContainerWorkingDirectory = string; +/** Alias for ContainerHostDataPath */ +export type ContainerHostDataPath = string; /** Alias for AutoUserScope */ export type AutoUserScope = string; /** Alias for ElevationLevel */ @@ -1572,5 +1816,9 @@ export type DependencyAction = string; export type BatchTaskState = string; /** Alias for BatchNodeRebootOption */ export type BatchNodeRebootOption = string; +/** Alias for BatchNodeDeallocateOption */ +export type BatchNodeDeallocateOption = string; +/** Alias for BatchNodeReimageOption */ +export type BatchNodeReimageOption = string; /** Alias for BatchNodeDisableSchedulingOption */ export type BatchNodeDisableSchedulingOption = string; diff --git a/sdk/batch/batch-rest/src/outputModels.ts b/sdk/batch/batch-rest/src/outputModels.ts index a8968ab6011c..44f19538a936 100644 --- a/sdk/batch/batch-rest/src/outputModels.ts +++ b/sdk/batch/batch-rest/src/outputModels.ts @@ -78,7 +78,7 @@ export interface VirtualMachineConfigurationOutput { nodeAgentSKUId: string; /** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */ windowsConfiguration?: WindowsConfigurationOutput; - /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ + /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ dataDisks?: Array; /** * This only applies to Images that contain the Windows operating system, and @@ -126,6 +126,10 @@ export interface ImageReferenceOutput { virtualMachineImageId?: string; /** The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. */ readonly exactVersion?: string; + /** The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. */ + sharedGalleryImageId?: string; + /** The community gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from community gallery image GET call. */ + communityGalleryImageId?: string; } /** Windows operating system settings to apply to the virtual machine. */ @@ -142,17 +146,29 @@ export interface WindowsConfigurationOutput { export interface DataDiskOutput { /** The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct logicalUnitNumber. The value must be between 0 and 63, inclusive. */ lun: number; - /** The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. */ + /** + * The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + * + * Possible values: "none", "readonly", "readwrite" + */ caching?: CachingTypeOutput; /** The initial disk size in gigabytes. */ diskSizeGB: number; - /** The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". */ + /** + * The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". + * + * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs" + */ storageAccountType?: StorageAccountTypeOutput; } /** The configuration for container-enabled Pools. */ export interface ContainerConfigurationOutput { - /** The container technology to be used. */ + /** + * The container technology to be used. + * + * Possible values: "dockerCompatible", "criCompatible" + */ type: ContainerTypeOutput; /** The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. */ containerImageNames?: string[]; @@ -187,7 +203,7 @@ export interface BatchNodeIdentityReferenceOutput { * Azure Compute Gallery Image. */ export interface DiskEncryptionConfigurationOutput { - /** The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */ + /** The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. */ targets?: DiskEncryptionTargetOutput[]; } @@ -197,7 +213,11 @@ export interface DiskEncryptionConfigurationOutput { * with best effort balancing. */ export interface BatchNodePlacementConfigurationOutput { - /** Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. */ + /** + * Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. + * + * Possible values: "Shared", "Startup", "VfsMounts", "Task", "JobPrep", "Applications" + */ policy?: BatchNodePlacementPolicyTypeOutput; } @@ -227,7 +247,11 @@ export interface VMExtensionOutput { export interface OSDiskOutput { /** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */ ephemeralOSDiskSettings?: DiffDiskSettingsOutput; - /** Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. */ + /** + * Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. + * + * Possible values: "none", "readonly", "readwrite" + */ caching?: CachingTypeOutput; /** The initial disk size in GB when creating new OS disk. */ diskSizeGB?: number; @@ -242,21 +266,45 @@ export interface OSDiskOutput { * compute node (VM). */ export interface DiffDiskSettingsOutput { - /** Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */ + /** + * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + * + * Possible values: "cachedisk" + */ placement?: DiffDiskPlacementOutput; } /** The managed disk parameters. */ export interface ManagedDiskOutput { - /** The storage account type for managed disk. */ - storageAccountType: StorageAccountTypeOutput; + /** + * The storage account type for managed disk. + * + * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs" + */ + storageAccountType?: StorageAccountTypeOutput; + /** Specifies the security profile settings for the managed disk. */ + securityProfile?: VMDiskSecurityProfileOutput; +} + +/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. */ +export interface VMDiskSecurityProfileOutput { + /** + * Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs. + * + * Possible values: "NonPersistedTPM", "VMGuestStateOnly" + */ + securityEncryptionType?: SecurityEncryptionTypesOutput; } /** Specifies the security profile settings for the virtual machine or virtual machine scale set. */ export interface SecurityProfileOutput { - /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. */ + /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. */ encryptionAtHost: boolean; - /** Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. */ + /** + * Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. + * + * Possible values: "trustedLaunch", "confidentialVM" + */ securityType: SecurityTypesOutput; /** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */ uefiSettings: UefiSettingsOutput; @@ -281,13 +329,17 @@ export interface ServiceArtifactReferenceOutput { /** The network configuration for a Pool. */ export interface NetworkConfigurationOutput { - /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. Enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId?: string; - /** The scope of dynamic vnet assignment. */ + /** + * The scope of dynamic vnet assignment. + * + * Possible values: "none", "job" + */ dynamicVNetAssignmentScope?: DynamicVNetAssignmentScopeOutput; - /** The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. */ + /** The configuration for endpoints on Compute Nodes in the Batch Pool. */ endpointConfiguration?: BatchPoolEndpointConfigurationOutput; - /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. */ + /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. */ publicIPAddressConfiguration?: PublicIpAddressConfigurationOutput; /** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */ enableAcceleratedNetworking?: boolean; @@ -306,7 +358,11 @@ export interface BatchPoolEndpointConfigurationOutput { export interface InboundNatPoolOutput { /** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */ name: string; - /** The protocol of the endpoint. */ + /** + * The protocol of the endpoint. + * + * Possible values: "tcp", "udp" + */ protocol: InboundEndpointProtocolOutput; /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */ backendPort: number; @@ -322,7 +378,11 @@ export interface InboundNatPoolOutput { export interface NetworkSecurityGroupRuleOutput { /** The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. */ priority: number; - /** The action that should be taken for a specified IP address, subnet range or tag. */ + /** + * The action that should be taken for a specified IP address, subnet range or tag. + * + * Possible values: "allow", "deny" + */ access: NetworkSecurityGroupRuleAccessOutput; /** The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. */ sourceAddressPrefix: string; @@ -332,7 +392,11 @@ export interface NetworkSecurityGroupRuleOutput { /** The public IP Address configuration of the networking configuration of a Pool. */ export interface PublicIpAddressConfigurationOutput { - /** The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. */ + /** + * The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. + * + * Possible values: "batchmanaged", "usermanaged", "nopublicipaddresses" + */ provision?: IpAddressProvisioningTypeOutput; /** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */ ipAddressIds?: string[]; @@ -354,7 +418,7 @@ export interface PublicIpAddressConfigurationOutput { * block Batch from being able to re-run the StartTask. */ export interface BatchStartTaskOutput { - /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettingsOutput; @@ -378,8 +442,26 @@ export interface BatchTaskContainerSettingsOutput { imageName: string; /** The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. */ registry?: ContainerRegistryReferenceOutput; - /** The location of the container Task working directory. The default is 'taskWorkingDirectory'. */ + /** + * The location of the container Task working directory. The default is 'taskWorkingDirectory'. + * + * Possible values: "taskWorkingDirectory", "containerImageDefault" + */ workingDirectory?: ContainerWorkingDirectoryOutput; + /** The paths you want to mounted to container task. If this array is null or be not present, container task will mount entire temporary disk drive in windows (or AZ_BATCH_NODE_ROOT_DIR in Linux). It won't' mount any data paths into container if this array is set as empty. */ + containerHostBatchBindMounts?: Array; +} + +/** The entry of path and mount mode you want to mount into task container. */ +export interface ContainerHostBatchBindMountEntryOutput { + /** + * The path which be mounted to container customer can select. + * + * Possible values: "regional", "zonal" + */ + source?: ContainerHostDataPathOutput; + /** Mount this source path as read-only mode or not. Default value is false (read/write mode). For Linux, if you mount this path as a read/write mode, this does not mean that all users in container have the read/write access for the path, it depends on the access in host VM. If this path is mounted read-only, all users within the container will not be able to modify the path. */ + isReadOnly?: boolean; } /** A single file or multiple files to be downloaded to a Compute Node. */ @@ -418,9 +500,17 @@ export interface UserIdentityOutput { /** Specifies the options for the auto user that runs an Azure Batch Task. */ export interface AutoUserSpecificationOutput { - /** The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. */ + /** + * The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. + * + * Possible values: "task", "pool" + */ scope?: AutoUserScopeOutput; - /** The elevation level of the auto user. The default value is nonAdmin. */ + /** + * The elevation level of the auto user. The default value is nonAdmin. + * + * Possible values: "nonadmin", "admin" + */ elevationLevel?: ElevationLevelOutput; } @@ -434,7 +524,11 @@ export interface BatchApplicationPackageReferenceOutput { /** Specifies how Tasks should be distributed across Compute Nodes. */ export interface BatchTaskSchedulingPolicyOutput { - /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + /** + * How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + * + * Possible values: "spread", "pack" + */ nodeFillType: BatchNodeFillTypeOutput; } @@ -447,7 +541,11 @@ export interface UserAccountOutput { name: string; /** The password for the user Account. */ password: string; - /** The elevation level of the user Account. The default value is nonAdmin. */ + /** + * The elevation level of the user Account. The default value is nonAdmin. + * + * Possible values: "nonadmin", "admin" + */ elevationLevel?: ElevationLevelOutput; /** The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. */ linuxUserConfiguration?: LinuxUserConfigurationOutput; @@ -467,7 +565,11 @@ export interface LinuxUserConfigurationOutput { /** Properties used to create a user Account on a Windows Compute Node. */ export interface WindowsUserConfigurationOutput { - /** The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch'. */ + /** + * The login mode for the user. The default is 'batch'. + * + * Possible values: "batch", "interactive" + */ loginMode?: LoginModeOutput; } @@ -552,11 +654,15 @@ export interface AzureFileShareConfigurationOutput { /** Describes an upgrade policy - automatic, manual, or rolling. */ export interface UpgradePolicyOutput { - /** Specifies the mode of an upgrade to virtual machines in the scale set.

Possible values are:

**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

**Automatic** - All virtual machines in the scale set are automatically updated at the same time.

**Rolling** - Scale set performs updates in batches with an optional pause time in between. */ + /** + * Specifies the mode of an upgrade to virtual machines in the scale set.

Possible values are:

**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

**Automatic** - All virtual machines in the scale set are automatically updated at the same time.

**Rolling** - Scale set performs updates in batches with an optional pause time in between. + * + * Possible values: "automatic", "manual", "rolling" + */ mode: UpgradeModeOutput; /** Configuration parameters used for performing automatic OS Upgrade. The configuration parameters used for performing automatic OS upgrade. */ automaticOSUpgradePolicy?: AutomaticOsUpgradePolicyOutput; - /** The configuration parameters used while performing a rolling upgrade. This property is only supported on Pools with the virtualMachineConfiguration property. */ + /** The configuration parameters used while performing a rolling upgrade. */ rollingUpgradePolicy?: RollingUpgradePolicyOutput; } @@ -564,7 +670,7 @@ export interface UpgradePolicyOutput { export interface AutomaticOsUpgradePolicyOutput { /** Whether OS image rollback feature should be disabled. */ disableAutomaticRollback?: boolean; - /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. */ + /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. */ enableAutomaticOSUpgrade?: boolean; /** Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. */ useRollingUpgradePolicy?: boolean; @@ -612,15 +718,23 @@ export interface BatchPoolOutput { readonly lastModified?: string; /** The creation time of the Pool. */ readonly creationTime?: string; - /** The current state of the Pool. */ + /** + * The current state of the Pool. + * + * Possible values: "active", "deleting" + */ readonly state?: BatchPoolStateOutput; /** The time at which the Pool entered its current state. */ readonly stateTransitionTime?: string; - /** Whether the Pool is resizing. */ + /** + * Whether the Pool is resizing. + * + * Possible values: "steady", "resizing", "stopping" + */ readonly allocationState?: AllocationStateOutput; /** The time at which the Pool entered its current allocation state. */ readonly allocationStateTransitionTime?: string; - /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes, see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */ readonly vmSize?: string; /** The virtual machine configuration for the Pool. This property must be specified. */ readonly virtualMachineConfiguration?: VirtualMachineConfigurationOutput; @@ -662,15 +776,23 @@ export interface BatchPoolOutput { readonly userAccounts?: Array; /** A list of name-value pairs associated with the Pool as metadata. */ readonly metadata?: Array; - /** Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + /** Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ readonly stats?: BatchPoolStatisticsOutput; /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ readonly mountConfiguration?: Array; /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ readonly identity?: BatchPoolIdentityOutput; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + /** + * The desired node communication mode for the pool. If omitted, the default value is Default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationModeOutput; - /** The current state of the pool communication mode. */ + /** + * The current state of the pool communication mode. + * + * Possible values: "default", "classic", "simplified" + */ readonly currentNodeCommunicationMode?: BatchNodeCommunicationModeOutput; /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */ upgradePolicy?: UpgradePolicyOutput; @@ -770,7 +892,11 @@ export interface BatchPoolResourceStatisticsOutput { /** The identity of the Batch pool, if configured. */ export interface BatchPoolIdentityOutput { - /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ + /** + * The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + * + * Possible values: "UserAssigned", "None" + */ type: BatchPoolIdentityTypeOutput; /** The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ userAssignedIdentities?: Array; @@ -803,13 +929,21 @@ export interface BatchSupportedImageOutput { nodeAgentSKUId: string; /** The reference to the Azure Virtual Machine's Marketplace Image. */ imageReference: ImageReferenceOutput; - /** The type of operating system (e.g. Windows or Linux) of the Image. */ + /** + * The type of operating system (e.g. Windows or Linux) of the Image. + * + * Possible values: "linux", "windows" + */ osType: OSTypeOutput; /** The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. */ capabilities?: string[]; /** The time when the Azure Batch service will stop accepting create Pool requests for the Image. */ batchSupportEndOfLife?: string; - /** Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. */ + /** + * Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. + * + * Possible values: "verified", "unverified" + */ verificationType: ImageVerificationTypeOutput; } @@ -859,6 +993,10 @@ export interface BatchNodeCountsOutput { unusable: number; /** The number of Compute Nodes in the waitingForStartTask state. */ waitingForStartTask: number; + /** The number of Compute Nodes in the deallocated state. */ + deallocated: number; + /** The number of Compute Nodes in the deallocating state. */ + deallocating: number; /** The total number of Compute Nodes. */ total: number; /** The number of Compute Nodes in the upgradingOS state. */ @@ -881,11 +1019,19 @@ export interface BatchJobOutput { readonly lastModified?: string; /** The creation time of the Job. */ readonly creationTime?: string; - /** The current state of the Job. */ + /** + * The current state of the Job. + * + * Possible values: "active", "disabling", "disabled", "enabling", "terminating", "completed", "deleting" + */ readonly state?: BatchJobStateOutput; /** The time at which the Job entered its current state. */ readonly stateTransitionTime?: string; - /** The previous state of the Job. This property is not set if the Job is in its initial Active state. */ + /** + * The previous state of the Job. This property is not set if the Job is in its initial Active state. + * + * Possible values: "active", "disabling", "disabled", "enabling", "terminating", "completed", "deleting" + */ readonly previousState?: BatchJobStateOutput; /** The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. */ readonly previousStateTransitionTime?: string; @@ -907,9 +1053,17 @@ export interface BatchJobOutput { readonly commonEnvironmentSettings?: Array; /** The Pool settings associated with the Job. */ poolInfo: BatchPoolInfoOutput; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksCompleteOutput; - /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + /** + * The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + * + * Possible values: "noaction", "performexitoptionsjobaction" + */ readonly onTaskFailure?: OnBatchTaskFailureOutput; /** The network configuration for the Job. */ readonly networkConfiguration?: BatchJobNetworkConfigurationOutput; @@ -917,7 +1071,7 @@ export interface BatchJobOutput { metadata?: Array; /** The execution information for the Job. */ readonly executionInfo?: BatchJobExecutionInfoOutput; - /** Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + /** Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ readonly stats?: BatchJobStatisticsOutput; } @@ -959,7 +1113,7 @@ export interface BatchJobManagerTaskOutput { id: string; /** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ displayName?: string; - /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettingsOutput; @@ -1020,7 +1174,7 @@ export interface OutputFileBlobContainerDestinationOutput { containerUrl: string; /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ identityReference?: BatchNodeIdentityReferenceOutput; - /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ + /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ uploadHeaders?: Array; } @@ -1037,7 +1191,11 @@ export interface HttpHeaderOutput { * to perform the upload. */ export interface OutputFileUploadConfigOutput { - /** The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. */ + /** + * The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. + * + * Possible values: "tasksuccess", "taskfailure", "taskcompletion" + */ uploadCondition: OutputFileUploadConditionOutput; } @@ -1091,7 +1249,7 @@ export interface AuthenticationTokenSettingsOutput { export interface BatchJobPreparationTaskOutput { /** A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettingsOutput; @@ -1130,7 +1288,7 @@ export interface BatchJobPreparationTaskOutput { export interface BatchJobReleaseTaskOutput { /** A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */ id?: string; - /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ commandLine: string; /** The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ containerSettings?: BatchTaskContainerSettingsOutput; @@ -1161,7 +1319,11 @@ export interface BatchPoolInfoOutput { export interface BatchAutoPoolSpecificationOutput { /** A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. */ autoPoolIdPrefix?: string; - /** The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. */ + /** + * The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + * + * Possible values: "jobschedule", "job" + */ poolLifetimeOption: BatchPoolLifetimeOptionOutput; /** Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option. */ keepAlive?: boolean; @@ -1175,7 +1337,7 @@ export interface BatchPoolSpecificationOutput { displayName?: string; /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ vmSize: string; - /** The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** The virtual machine configuration for the Pool. This property must be specified. */ virtualMachineConfiguration?: VirtualMachineConfigurationOutput; /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ taskSlotsPerNode?: number; @@ -1209,7 +1371,11 @@ export interface BatchPoolSpecificationOutput { metadata?: Array; /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ mountConfiguration?: Array; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + /** + * The desired node communication mode for the pool. If omitted, the default value is Default. + * + * Possible values: "default", "classic", "simplified" + */ targetNodeCommunicationMode?: BatchNodeCommunicationModeOutput; /** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */ upgradePolicy?: UpgradePolicyOutput; @@ -1217,8 +1383,10 @@ export interface BatchPoolSpecificationOutput { /** The network configuration for the Job. */ export interface BatchJobNetworkConfigurationOutput { - /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ subnetId: string; + /** Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. */ + skipWithdrawFromVNet: boolean; } /** Contains information about the execution of a Job in the Azure Batch service. */ @@ -1237,7 +1405,11 @@ export interface BatchJobExecutionInfoOutput { /** An error encountered by the Batch service when scheduling a Job. */ export interface BatchJobSchedulingErrorOutput { - /** The category of the Job scheduling error. */ + /** + * The category of the Job scheduling error. + * + * Possible values: "usererror", "servererror" + */ category: ErrorCategoryOutput; /** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; @@ -1321,7 +1493,11 @@ export interface BatchJobPreparationTaskExecutionInfoOutput { startTime: string; /** The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. */ endTime?: string; - /** The current state of the Job Preparation Task on the Compute Node. */ + /** + * The current state of the Job Preparation Task on the Compute Node. + * + * Possible values: "running", "completed" + */ state: BatchJobPreparationTaskStateOutput; /** The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ taskRootDirectory?: string; @@ -1337,7 +1513,11 @@ export interface BatchJobPreparationTaskExecutionInfoOutput { retryCount: number; /** The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ lastRetryTime?: string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -1353,7 +1533,11 @@ export interface BatchTaskContainerExecutionInfoOutput { /** Information about a Task failure. */ export interface BatchTaskFailureInfoOutput { - /** The category of the Task error. */ + /** + * The category of the Task error. + * + * Possible values: "usererror", "servererror" + */ category: ErrorCategoryOutput; /** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; @@ -1372,7 +1556,11 @@ export interface BatchJobReleaseTaskExecutionInfoOutput { startTime: string; /** The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. */ endTime?: string; - /** The current state of the Job Release Task on the Compute Node. */ + /** + * The current state of the Job Release Task on the Compute Node. + * + * Possible values: "running", "completed" + */ state: BatchJobReleaseTaskStateOutput; /** The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ taskRootDirectory?: string; @@ -1384,7 +1572,11 @@ export interface BatchJobReleaseTaskExecutionInfoOutput { containerInfo?: BatchTaskContainerExecutionInfoOutput; /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: BatchTaskFailureInfoOutput; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -1441,11 +1633,19 @@ export interface BatchJobScheduleOutput { readonly lastModified?: string; /** The creation time of the Job Schedule. */ readonly creationTime?: string; - /** The current state of the Job Schedule. */ + /** + * The current state of the Job Schedule. + * + * Possible values: "active", "completed", "disabled", "terminating", "deleting" + */ readonly state?: BatchJobScheduleStateOutput; /** The time at which the Job Schedule entered the current state. */ readonly stateTransitionTime?: string; - /** The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. */ + /** + * The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. + * + * Possible values: "active", "completed", "disabled", "terminating", "deleting" + */ readonly previousState?: BatchJobScheduleStateOutput; /** The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state. */ readonly previousStateTransitionTime?: string; @@ -1488,9 +1688,17 @@ export interface BatchJobSpecificationOutput { displayName?: string; /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ usesTaskDependencies?: boolean; - /** The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + /** + * The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + * + * Possible values: "noaction", "terminatejob" + */ onAllTasksComplete?: OnAllBatchTasksCompleteOutput; - /** The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + /** + * The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + * + * Possible values: "noaction", "performexitoptionsjobaction" + */ onTaskFailure?: OnBatchTaskFailureOutput; /** The network configuration for the Job. */ networkConfiguration?: BatchJobNetworkConfigurationOutput; @@ -1598,9 +1806,17 @@ export interface ExitCodeMappingOutput { /** Specifies how the Batch service responds to a particular exit condition. */ export interface ExitOptionsOutput { - /** An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + /** + * An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + * + * Possible values: "none", "disable", "terminate" + */ jobAction?: BatchJobActionOutput; - /** An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. */ + /** + * An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. + * + * Possible values: "satisfy", "block" + */ dependencyAction?: DependencyActionOutput; } @@ -1698,15 +1914,23 @@ export interface BatchTaskOutput { readonly creationTime?: string; /** How the Batch service should respond when the Task completes. */ readonly exitConditions?: ExitConditionsOutput; - /** The current state of the Task. */ + /** + * The current state of the Task. + * + * Possible values: "active", "preparing", "running", "completed" + */ readonly state?: BatchTaskStateOutput; /** The time at which the Task entered its current state. */ readonly stateTransitionTime?: string; - /** The previous state of the Task. This property is not set if the Task is in its initial Active state. */ + /** + * The previous state of the Task. This property is not set if the Task is in its initial Active state. + * + * Possible values: "active", "preparing", "running", "completed" + */ readonly previousState?: BatchTaskStateOutput; /** The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. */ readonly previousStateTransitionTime?: string; - /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */ readonly commandLine?: string; /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ readonly containerSettings?: BatchTaskContainerSettingsOutput; @@ -1760,7 +1984,11 @@ export interface BatchTaskExecutionInfoOutput { requeueCount: number; /** The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. */ lastRequeueTime?: string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -1814,7 +2042,11 @@ export interface BatchTaskAddCollectionResultOutput { /** Result for a single Task added as part of an add Task collection operation. */ export interface BatchTaskAddResultOutput { - /** The status of the add Task request. */ + /** + * The status of the add Task request. + * + * Possible values: "success", "clienterror", "servererror" + */ status: BatchTaskAddStatusOutput; /** The ID of the Task for which this is the result. */ taskId: string; @@ -1852,15 +2084,27 @@ export interface BatchSubtaskOutput { containerInfo?: BatchTaskContainerExecutionInfoOutput; /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: BatchTaskFailureInfoOutput; - /** The current state of the subtask. */ + /** + * The current state of the subtask. + * + * Possible values: "preparing", "running", "completed" + */ state?: BatchSubtaskStateOutput; /** The time at which the subtask entered its current state. */ stateTransitionTime?: string; - /** The previous state of the subtask. This property is not set if the subtask is in its initial running state. */ + /** + * The previous state of the subtask. This property is not set if the subtask is in its initial running state. + * + * Possible values: "preparing", "running", "completed" + */ previousState?: BatchSubtaskStateOutput; /** The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state. */ previousStateTransitionTime?: string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -1907,9 +2151,17 @@ export interface BatchNodeOutput { id?: string; /** The URL of the Compute Node. */ url?: string; - /** The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. */ + /** + * The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. + * + * Possible values: "idle", "rebooting", "reimaging", "running", "unusable", "creating", "starting", "waitingforstarttask", "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", "upgradingos", "deallocated", "deallocating" + */ state?: BatchNodeStateOutput; - /** Whether the Compute Node is available for Task scheduling. */ + /** + * Whether the Compute Node is available for Task scheduling. + * + * Possible values: "enabled", "disabled" + */ schedulingState?: SchedulingStateOutput; /** The time at which the Compute Node entered its current state. */ stateTransitionTime?: string; @@ -1959,7 +2211,11 @@ export interface BatchTaskInfoOutput { taskId?: string; /** The ID of the subtask if the Task is a multi-instance Task. */ subtaskId?: number; - /** The current state of the Task. */ + /** + * The current state of the Task. + * + * Possible values: "active", "preparing", "running", "completed" + */ taskState: BatchTaskStateOutput; /** Information about the execution of the Task. */ executionInfo?: BatchTaskExecutionInfoOutput; @@ -1967,7 +2223,11 @@ export interface BatchTaskInfoOutput { /** Information about a StartTask running on a Compute Node. */ export interface BatchStartTaskInfoOutput { - /** The state of the StartTask on the Compute Node. */ + /** + * The state of the StartTask on the Compute Node. + * + * Possible values: "running", "completed" + */ state: BatchStartTaskStateOutput; /** The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). */ startTime: string; @@ -1983,7 +2243,11 @@ export interface BatchStartTaskInfoOutput { retryCount: number; /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ lastRetryTime?: string; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + /** + * The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + * + * Possible values: "success", "failure" + */ result?: BatchTaskExecutionResultOutput; } @@ -2007,7 +2271,11 @@ export interface BatchNodeEndpointConfigurationOutput { export interface InboundEndpointOutput { /** The name of the endpoint. */ name: string; - /** The protocol of the endpoint. */ + /** + * The protocol of the endpoint. + * + * Possible values: "tcp", "udp" + */ protocol: InboundEndpointProtocolOutput; /** The public IP address of the Compute Node. */ publicIPAddress: string; @@ -2088,7 +2356,11 @@ export interface InstanceViewStatusOutput { code?: string; /** The localized label for the status. */ displayStatus?: string; - /** Level code. */ + /** + * Level code. + * + * Possible values: "Error", "Info", "Warning" + */ level?: StatusLevelTypesOutput; /** The detailed status message. */ message?: string; @@ -2116,6 +2388,8 @@ export type DiskEncryptionTargetOutput = string; export type BatchNodePlacementPolicyTypeOutput = string; /** Alias for DiffDiskPlacementOutput */ export type DiffDiskPlacementOutput = string; +/** Alias for SecurityEncryptionTypesOutput */ +export type SecurityEncryptionTypesOutput = string; /** Alias for SecurityTypesOutput */ export type SecurityTypesOutput = string; /** Alias for DynamicVNetAssignmentScopeOutput */ @@ -2128,6 +2402,8 @@ export type NetworkSecurityGroupRuleAccessOutput = string; export type IpAddressProvisioningTypeOutput = string; /** Alias for ContainerWorkingDirectoryOutput */ export type ContainerWorkingDirectoryOutput = string; +/** Alias for ContainerHostDataPathOutput */ +export type ContainerHostDataPathOutput = string; /** Alias for AutoUserScopeOutput */ export type AutoUserScopeOutput = string; /** Alias for ElevationLevelOutput */ diff --git a/sdk/batch/batch-rest/src/parameters.ts b/sdk/batch/batch-rest/src/parameters.ts index 0c8f58f0558e..16a2c925478e 100644 --- a/sdk/batch/batch-rest/src/parameters.ts +++ b/sdk/batch/batch-rest/src/parameters.ts @@ -25,6 +25,8 @@ import { BatchNodeUserCreateContent, BatchNodeUserUpdateContent, BatchNodeRebootContent, + BatchNodeDeallocateContent, + BatchNodeReimageContent, BatchNodeDisableSchedulingContent, UploadBatchServiceLogsContent, } from "./models.js"; @@ -138,7 +140,7 @@ export interface ListPoolUsageMetricsQueryParamProperties { endtime?: Date | string; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. */ $filter?: string; } @@ -226,7 +228,7 @@ export interface ListPoolsQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools. */ $filter?: string; /** An OData $select clause. */ @@ -896,7 +898,7 @@ export interface ListSupportedImagesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. */ $filter?: string; } @@ -939,7 +941,7 @@ export interface ListPoolNodeCountsQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. */ $filter?: string; } @@ -999,6 +1001,8 @@ export interface DeleteJobHeaders { export interface DeleteJobQueryParamProperties { /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ timeOut?: number; + /** If true, the server will delete the Job even if the corresponding nodes have not fully processed the deletion. The default value is false. */ + force?: boolean; } export interface DeleteJobQueryParam { @@ -1380,6 +1384,8 @@ export interface TerminateJobBodyParam { export interface TerminateJobQueryParamProperties { /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ timeOut?: number; + /** If true, the server will terminate the Job even if the corresponding nodes have not fully processed the termination. The default value is false. */ + force?: boolean; } export interface TerminateJobQueryParam { @@ -1472,7 +1478,7 @@ export interface ListJobsQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs. */ $filter?: string; /** An OData $select clause. */ @@ -1517,7 +1523,7 @@ export interface ListJobsFromScheduleQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. */ $filter?: string; /** An OData $select clause. */ @@ -1564,7 +1570,7 @@ export interface ListJobPreparationAndReleaseTaskStatusQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. */ $filter?: string; /** An OData $select clause. */ @@ -1717,6 +1723,8 @@ export interface DeleteJobScheduleHeaders { export interface DeleteJobScheduleQueryParamProperties { /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ timeOut?: number; + /** If true, the server will delete the JobSchedule even if the corresponding nodes have not fully processed the deletion. The default value is false. */ + force?: boolean; } export interface DeleteJobScheduleQueryParam { @@ -2087,6 +2095,8 @@ export interface TerminateJobScheduleHeaders { export interface TerminateJobScheduleQueryParamProperties { /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ timeOut?: number; + /** If true, the server will terminate the JobSchedule even if the corresponding nodes have not fully processed the termination. The default value is false. */ + force?: boolean; } export interface TerminateJobScheduleQueryParam { @@ -2172,7 +2182,7 @@ export interface ListJobSchedulesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. */ $filter?: string; /** An OData $select clause. */ @@ -2264,7 +2274,7 @@ export interface ListTasksQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks. */ $filter?: string; /** An OData $select clause. */ @@ -2821,7 +2831,7 @@ export interface ListTaskFilesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files. */ $filter?: string; /** @@ -3044,6 +3054,127 @@ export type RebootNodeParameters = RebootNodeQueryParam & RebootNodeBodyParam & RequestParameters; +export interface StartNodeHeaders { + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + */ + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; +} + +export interface StartNodeQueryParamProperties { + /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ + timeOut?: number; +} + +export interface StartNodeQueryParam { + queryParameters?: StartNodeQueryParamProperties; +} + +export interface StartNodeHeaderParam { + headers?: RawHttpHeadersInput & StartNodeHeaders; +} + +export type StartNodeParameters = StartNodeQueryParam & StartNodeHeaderParam & RequestParameters; + +export interface DeallocateNodeHeaders { + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + */ + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; +} + +export interface DeallocateNodeBodyParam { + /** The options to use for deallocating the Compute Node. */ + body?: BatchNodeDeallocateContent; +} + +export interface DeallocateNodeQueryParamProperties { + /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ + timeOut?: number; +} + +export interface DeallocateNodeQueryParam { + queryParameters?: DeallocateNodeQueryParamProperties; +} + +export interface DeallocateNodeHeaderParam { + headers?: RawHttpHeadersInput & DeallocateNodeHeaders; +} + +export interface DeallocateNodeMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type DeallocateNodeParameters = DeallocateNodeQueryParam & + DeallocateNodeHeaderParam & + DeallocateNodeMediaTypesParam & + DeallocateNodeBodyParam & + RequestParameters; + +export interface ReimageNodeHeaders { + /** + * The caller-generated request identity, in the form of a GUID with no decoration + * such as curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + */ + "client-request-id"?: string; + /** Whether the server should return the client-request-id in the response. */ + "return-client-request-id"?: boolean; + /** + * The time the request was issued. Client libraries typically set this to the + * current system clock time; set it explicitly if you are calling the REST API + * directly. + */ + "ocp-date"?: string; +} + +export interface ReimageNodeBodyParam { + /** The options to use for reimaging the Compute Node. */ + body?: BatchNodeReimageContent; +} + +export interface ReimageNodeQueryParamProperties { + /** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */ + timeOut?: number; +} + +export interface ReimageNodeQueryParam { + queryParameters?: ReimageNodeQueryParamProperties; +} + +export interface ReimageNodeHeaderParam { + headers?: RawHttpHeadersInput & ReimageNodeHeaders; +} + +export interface ReimageNodeMediaTypesParam { + /** Type of content */ + contentType: "application/json; odata=minimalmetadata"; +} + +export type ReimageNodeParameters = ReimageNodeQueryParam & + ReimageNodeHeaderParam & + ReimageNodeMediaTypesParam & + ReimageNodeBodyParam & + RequestParameters; + export interface DisableNodeSchedulingHeaders { /** * The caller-generated request identity, in the form of a GUID with no decoration @@ -3226,7 +3357,7 @@ export interface ListNodesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. */ $filter?: string; /** An OData $select clause. */ @@ -3479,7 +3610,7 @@ export interface ListNodeFilesQueryParamProperties { maxresults?: number; /** * An OData $filter clause. For more information on constructing this filter, see - * https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. */ $filter?: string; /** Whether to list children of a directory. */ diff --git a/sdk/batch/batch-rest/src/responses.ts b/sdk/batch/batch-rest/src/responses.ts index dbd8a974ed68..c634446b596e 100644 --- a/sdk/batch/batch-rest/src/responses.ts +++ b/sdk/batch/batch-rest/src/responses.ts @@ -1397,6 +1397,78 @@ export interface RebootNodeDefaultResponse extends HttpResponse { body: BatchErrorOutput; } +export interface StartNode202Headers { + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ + "client-request-id"?: string; + /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ + "request-id"?: string; +} + +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface StartNode202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & StartNode202Headers; +} + +export interface StartNodeDefaultResponse extends HttpResponse { + status: string; + body: BatchErrorOutput; +} + +export interface DeallocateNode202Headers { + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ + "client-request-id"?: string; + /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ + "request-id"?: string; +} + +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface DeallocateNode202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & DeallocateNode202Headers; +} + +export interface DeallocateNodeDefaultResponse extends HttpResponse { + status: string; + body: BatchErrorOutput; +} + +export interface ReimageNode202Headers { + /** The OData ID of the resource to which the request applied. */ + dataserviceid: string; + /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Modified-Since, If-Unmodified-Since, If-Match or If-None-Match headers. */ + etag?: string; + /** The time at which the resource was last modified. */ + "last-modified"?: string; + /** The client-request-id provided by the client during the request. This will be returned only if the return-client-request-id parameter was set to true. */ + "client-request-id"?: string; + /** A unique identifier for the request that was made to the Batch service. If a request is consistently failing and you have verified that the request is properly formulated, you may use this value to report the error to Microsoft. In your report, include the value of this request ID, the approximate time that the request was made, the Batch Account against which the request was made, and the region that Account resides in. */ + "request-id"?: string; +} + +/** The request has been accepted for processing, but processing has not yet completed. */ +export interface ReimageNode202Response extends HttpResponse { + status: "202"; + headers: RawHttpHeaders & ReimageNode202Headers; +} + +export interface ReimageNodeDefaultResponse extends HttpResponse { + status: string; + body: BatchErrorOutput; +} + export interface DisableNodeScheduling200Headers { /** The OData ID of the resource to which the request applied. */ dataserviceid: string; diff --git a/sdk/batch/batch-rest/test/computeNodes.spec.ts b/sdk/batch/batch-rest/test/computeNodes.spec.ts index 9586582ab3bb..744e73c0c144 100644 --- a/sdk/batch/batch-rest/test/computeNodes.spec.ts +++ b/sdk/batch/batch-rest/test/computeNodes.spec.ts @@ -230,7 +230,7 @@ describe("Compute node operations", async () => { it("should reimage a compute node successfully", async () => { const reimageNodeResult = await batchClient .path( - "/pools/{poolId}/nodes/{nodeId}/reboot", + "/pools/{poolId}/nodes/{nodeId}/reimage", recorder.variable("BASIC_POOL", BASIC_POOL), computeNodes[1], ) @@ -264,4 +264,34 @@ describe("Compute node operations", async () => { assert.isAtLeast(uploadLogResult.body.numberOfFilesUploaded, 1); }); + + it("should deallocate and then start a compute node successfully", async () => { + const poolId = recorder.variable("BASIC_POOL", BASIC_POOL); + const nodeId = computeNodes[3]; + + const deallocateNodeResult = await batchClient + .path("/pools/{poolId}/nodes/{nodeId}/deallocate", poolId, nodeId) + .post({ contentType: "application/json; odata=minimalmetadata" }); + assert.equal(deallocateNodeResult.status, "202"); + + const checkIfDeallocated = async () => { + const nodes = await batchClient.path("/pools/{poolId}/nodes", poolId).get(); + if (isUnexpected(nodes)) { + assert.fail(`Received unexpected status code from listing nodes: ${nodes.status} + Response Body: ${nodes.body.message}`); + } + const node = nodes.body.value?.find((n) => n.id === nodeId); + if (node?.state === "deallocated") { + return node; + } + return null; + }; + + await waitForNotNull(checkIfDeallocated); + + const startNodeResult = await batchClient + .path("/pools/{poolId}/nodes/{nodeId}/start", poolId, nodeId) + .post({ contentType: "application/json; odata=minimalmetadata" }); + assert.equal(startNodeResult.status, "202"); + }); }); diff --git a/sdk/batch/batch-rest/test/jobs.spec.ts b/sdk/batch/batch-rest/test/jobs.spec.ts index 48f24cd5d04c..88df260a8fd1 100644 --- a/sdk/batch/batch-rest/test/jobs.spec.ts +++ b/sdk/batch/batch-rest/test/jobs.spec.ts @@ -219,6 +219,9 @@ describe("Job Operations Test", () => { .path("/jobs/{jobId}/terminate", recorder.variable("JOB_NAME", JOB_NAME)) .post({ contentType: "application/json; odata=minimalmetadata", + queryParameters: { + force: true, + }, }); assert.equal(terminateJobResult.status, "202"); @@ -237,7 +240,9 @@ describe("Job Operations Test", () => { it("should delete a job successfully", async function () { const jobId = recorder.variable("JOB_NAME", JOB_NAME); - const deleteJobResult = await batchClient.path("/jobs/{jobId}", jobId).delete(); + const deleteJobResult = await batchClient.path("/jobs/{jobId}", jobId).delete({ + queryParameters: { force: true }, + }); assert.equal(deleteJobResult.status, "202"); }); }); diff --git a/sdk/batch/batch-rest/test/pools.spec.ts b/sdk/batch/batch-rest/test/pools.spec.ts index 91ab004c0aa8..475867801c5b 100644 --- a/sdk/batch/batch-rest/test/pools.spec.ts +++ b/sdk/batch/batch-rest/test/pools.spec.ts @@ -20,6 +20,7 @@ import { fakeTestPasswordPlaceholder1 } from "./utils/fakeTestSecrets.js"; import { wait } from "./utils/wait.js"; import { getResourceName, POLLING_INTERVAL, waitForNotNull } from "./utils/helpers.js"; import { describe, it, beforeEach, afterEach, assert, expect } from "vitest"; +import { waitForNodesToStart } from "./utils/pool.js"; const BASIC_POOL = getResourceName("Pool-Basic"); const VMSIZE_D1 = "Standard_D1_v2"; @@ -31,6 +32,7 @@ const ENDPOINT_POOL = getResourceName("Pool-Endpoint"); const TEST_POOL3 = getResourceName("Pool-3"); const SECURITY_PROFILE_POOL = getResourceName("Pool-SecurityProfile"); const AUTO_OS_UPGRADE_POOL = getResourceName("Pool-AutoOSUpgrade"); +const CVM_POOL = getResourceName("Pool-Confidential"); describe("Pool Operations Test", () => { let recorder: Recorder; @@ -348,6 +350,13 @@ describe("Pool Operations Test", () => { body: { id: recorder.variable("ENDPOINT_POOL", ENDPOINT_POOL), vmSize: VMSIZE_A1, + userAccounts: [ + { + name: nonAdminPoolUser, + password: isPlaybackMode() ? fakeTestPasswordPlaceholder1 : "user_1account_password2", + elevationLevel: "nonadmin", + }, + ], networkConfiguration: { endpointConfiguration: { inboundNATPools: [ @@ -365,15 +374,22 @@ describe("Pool Operations Test", () => { }, ], }, + { + name: "ssh", + protocol: "tcp", + backendPort: 22, + frontendPortRangeStart: 15000, + frontendPortRangeEnd: 15100, + }, ], }, }, virtualMachineConfiguration: { - nodeAgentSKUId: "batch.node.ubuntu 18.04", + nodeAgentSKUId: "batch.node.ubuntu 22.04", imageReference: { publisher: "Canonical", - offer: "UbuntuServer", - sku: "18.04-LTS", + offer: "0001-com-ubuntu-server-jammy", + sku: "22_04-lts", }, }, targetDedicatedNodes: 1, @@ -387,25 +403,8 @@ describe("Pool Operations Test", () => { it("should get the details of a pool with endpoint configuration successfully", async () => { const poolId = recorder.variable("ENDPOINT_POOL", ENDPOINT_POOL); - const listNodes = async () => { - const listResult = await batchClient.path("/pools/{poolId}/nodes", poolId).get(); - if (isUnexpected(listResult)) { - assert.fail(`Received unexpected status code from list compute nodes: ${listResult.status} - Response Body: ${listResult.body.message}`); - } - - const paginateResponse = paginate(batchClient, listResult); - const nodeList = []; - for await (const node of paginateResponse) { - nodeList.push(node); - } - if (nodeList.length > 0) { - return nodeList; - } - return null; - }; - const nodeList = await waitForNotNull(listNodes); + const nodeList = await waitForNodesToStart(poolId, batchClient); assert.lengthOf(nodeList, 1); assert.isDefined(nodeList[0].endpointConfiguration); @@ -451,6 +450,28 @@ describe("Pool Operations Test", () => { assert.equal(endpointPoolObj[0].lowPriority!.total, 0); }); + it("should get a remote login settings successfully", async () => { + const poolId = recorder.variable("ENDPOINT_POOL", ENDPOINT_POOL); + + const nodeList = await waitForNodesToStart(poolId, batchClient); + + const node = nodeList[0]; + if (!node.id) { + assert.fail("Node id is not defined in the node object"); + } + + const res = await batchClient + .path("/pools/{poolId}/nodes/{nodeId}/remoteloginsettings", poolId, node.id) + .get(); + + if (isUnexpected(res)) { + assert.fail(`Received unexpected status code from getting remote login settings: ${res.status} + Response Body: ${res.body.message}`); + } + expect(res.body.remoteLoginIPAddress).to.be.a("string"); + expect(res.body.remoteLoginPort).to.be.a("number"); + }); + it("should create a second pool successfully", async () => { const poolAddParams: CreatePoolParameters = { body: { @@ -676,4 +697,66 @@ describe("Pool Operations Test", () => { await batchClient.path("/pools/{poolId}", poolId).delete(); } }); + + it("should create a pool with confidential VM", async () => { + const poolId = recorder.variable("CVM_POOL", CVM_POOL); + const poolParams: CreatePoolParameters = { + body: { + id: poolId, + vmSize: VMSIZE_D2s, + virtualMachineConfiguration: { + imageReference: { + publisher: "Canonical", + offer: "0001-com-ubuntu-server-jammy", + sku: "22_04-lts", + }, + nodeAgentSKUId: "batch.node.ubuntu 22.04", + securityProfile: { + securityType: "confidentialVM", + encryptionAtHost: true, + uefiSettings: { + secureBootEnabled: true, + vTpmEnabled: true, + }, + }, + osDisk: { + managedDisk: { + securityProfile: { + securityEncryptionType: "VMGuestStateOnly", + }, + }, + }, + }, + targetDedicatedNodes: 0, + }, + contentType: "application/json; odata=minimalmetadata", + }; + + const result = await batchClient.path("/pools").post(poolParams); + + if (isUnexpected(result)) { + assert.fail(`Received unexpected status code from creating pool: ${result.status}`); + } + + try { + const res = await batchClient.path("/pools/{poolId}", poolId).get(); + + if (isUnexpected(res)) { + assert.fail(`Received unexpected status code from getting pool: ${res.status}`); + } + const securityProfile = res.body.virtualMachineConfiguration!.securityProfile!; + assert.equal(securityProfile.securityType?.toLocaleLowerCase(), "confidentialvm"); + assert.equal(securityProfile.encryptionAtHost, true); + assert.equal(securityProfile.uefiSettings!.secureBootEnabled, true); + assert.equal(securityProfile.uefiSettings!.vTpmEnabled, true); + + const osDisk = res.body.virtualMachineConfiguration!.osDisk!; + assert.equal( + osDisk.managedDisk!.securityProfile!.securityEncryptionType?.toLocaleLowerCase(), + "vmgueststateonly", + ); + } finally { + await batchClient.path("/pools/{poolId}", poolId).delete(); + } + }); }); diff --git a/sdk/batch/batch-rest/test/utils/envTokenCredential.ts b/sdk/batch/batch-rest/test/utils/envTokenCredential.ts new file mode 100644 index 000000000000..07c5660016cc --- /dev/null +++ b/sdk/batch/batch-rest/test/utils/envTokenCredential.ts @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { TokenCredential } from "@azure/identity"; + +/** + * A TokenCredential implementation that gets the token from the environment variable + * It's only used in browser live tests. + */ +export class EnvTokenCredential implements TokenCredential { + private token: string; + + constructor() { + const token = process.env["AZURE_BATCH_ACCESS_TOKEN"]; + if (!token) { + throw new Error("AZURE_BATCH_ACCESS_TOKEN must be set"); + } + this.token = token; + } + + async getToken(): Promise<{ token: string; expiresOnTimestamp: number }> { + return { token: this.token, expiresOnTimestamp: Date.now() + 60 * 60 * 24 }; + } +} diff --git a/sdk/batch/batch-rest/test/utils/pool.ts b/sdk/batch/batch-rest/test/utils/pool.ts new file mode 100644 index 000000000000..f8e741a2dce3 --- /dev/null +++ b/sdk/batch/batch-rest/test/utils/pool.ts @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { assert } from "vitest"; +import { BatchClient } from "../../src/clientDefinitions.js"; +import { isUnexpected } from "../../src/isUnexpected.js"; +import { paginate } from "../../src/paginateHelper.js"; +import { waitForNotNull } from "./helpers.js"; +import { BatchNodeOutput } from "../../src/outputModels.js"; + +export function waitForNodesToStart( + poolId: string, + batchClient: BatchClient, +): Promise { + const listNodes = async (): Promise => { + const listResult = await batchClient.path("/pools/{poolId}/nodes", poolId).get(); + if (isUnexpected(listResult)) { + assert.fail(`Received unexpected status code from list compute nodes: ${listResult.status} + Response Body: ${listResult.body.message}`); + } + + const paginateResponse = paginate(batchClient, listResult); + const nodeList = []; + for await (const node of paginateResponse) { + nodeList.push(node); + } + if (nodeList.length > 0) { + return nodeList; + } + return null; + }; + + return waitForNotNull(listNodes); +} diff --git a/sdk/batch/batch-rest/test/utils/recordedClient.ts b/sdk/batch/batch-rest/test/utils/recordedClient.ts index c516a5792352..25a8ee45e42c 100644 --- a/sdk/batch/batch-rest/test/utils/recordedClient.ts +++ b/sdk/batch/batch-rest/test/utils/recordedClient.ts @@ -15,14 +15,10 @@ import { fakeAzureBatchAccount, fakeAzureBatchEndpoint, } from "./fakeTestSecrets.js"; -import { - // AzureCliCredential, - // AzureCliCredential, - InteractiveBrowserCredential, -} from "@azure/identity"; import { isNode } from "@azure-tools/test-utils"; import { NoOpCredential } from "@azure-tools/test-credential"; import { AzureNamedKeyCredential } from "@azure/core-auth"; +import { EnvTokenCredential } from "./envTokenCredential.js"; const recorderEnvSetup: RecorderStartOptions = { envSetupForPlayback: { @@ -34,7 +30,7 @@ const recorderEnvSetup: RecorderStartOptions = { AZURE_BATCH_ACCESS_KEY: "api_key", }, // see https://github.com/Azure/azure-sdk-tools/blob/main/tools/test-proxy/Azure.Sdk.Tools.TestProxy/Common/SanitizerDictionary.cs - removeCentralSanitizers: ["AZSDK3430", "AZSDK3479", "AZSDK3402", "AZSDK3493"], + removeCentralSanitizers: ["AZSDK3430", "AZSDK3479", "AZSDK3402", "AZSDK3493", "AZSDK4001"], sanitizerOptions: { bodyKeySanitizers: [ { @@ -75,14 +71,7 @@ export function createBatchClient(recorder?: Recorder, options: ClientOptions = ? new NoOpCredential() : isNode ? new AzureNamedKeyCredential(env.AZURE_BATCH_ACCOUNT!, env.AZURE_BATCH_ACCESS_KEY!) - : // : new AzureCliCredential(); - new InteractiveBrowserCredential({ - clientId: "04b07795-8ddb-461a-bbee-02f9e1bf7b46", - tokenCachePersistenceOptions: { - enabled: true, - name: "batch-test-cache", - }, - }); + : new EnvTokenCredential(); if (!isPlaybackMode() && !env.AZURE_BATCH_ENDPOINT) { throw Error("AZURE_BATCH_ENDPOINT env variable should be set in live mode"); diff --git a/sdk/batch/batch-rest/tsp-location.yaml b/sdk/batch/batch-rest/tsp-location.yaml index 4c2217171342..c4d5e0296846 100644 --- a/sdk/batch/batch-rest/tsp-location.yaml +++ b/sdk/batch/batch-rest/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/batch/Azure.Batch -commit: 934f8b595b6aba70096c6fd19089c3986b344c0c +commit: 191c76349cdbc840567a9f1b2cbae50fd57bc1b9 repo: Azure/azure-rest-api-specs additionalDirectories: