From fba3b12d25162aaccd6d98d97213533e8c8e9fa6 Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Mon, 3 Jun 2019 12:36:21 -0700 Subject: [PATCH] [AutoPR batch/data-plane] [Batch] Add new data plane verison 2019-06-01 (#4876) * Generated from 1143452831aa606810a3cb4ff1c16ba263845004 Add new data plane version 2019-06-01 * Generated from 8ad857526957431e98e0b557fb7bcacdbfd5eb02 Add new data plane version 2019-06-01 * Generated from b0493535fd9ed2131d59963241b2009341fd217f Add new data plane version 2019-06-01 * Generated from b0493535fd9ed2131d59963241b2009341fd217f Add new data plane version 2019-06-01 --- .../latest/batch/batch/batchapi/models.go | 2 +- profiles/latest/batch/batch/models.go | 38 +- .../preview/batch/batch/batchapi/models.go | 2 +- profiles/preview/batch/batch/models.go | 38 +- .../batch/2019-06-01.9.0/batch/account.go | 356 ++ .../batch/2019-06-01.9.0/batch/application.go | 302 ++ .../batch/batchapi/interfaces.go | 156 + .../batch/2019-06-01.9.0/batch/certificate.go | 645 +++ services/batch/2019-06-01.9.0/batch/client.go | 44 + .../batch/2019-06-01.9.0/batch/computenode.go | 1432 ++++++ services/batch/2019-06-01.9.0/batch/file.go | 1102 +++++ services/batch/2019-06-01.9.0/batch/job.go | 1924 ++++++++ .../batch/2019-06-01.9.0/batch/jobschedule.go | 1521 ++++++ services/batch/2019-06-01.9.0/batch/models.go | 4219 +++++++++++++++++ services/batch/2019-06-01.9.0/batch/pool.go | 2055 ++++++++ services/batch/2019-06-01.9.0/batch/task.go | 1258 +++++ .../batch/2019-06-01.9.0/batch/version.go | 30 + 17 files changed, 15104 insertions(+), 20 deletions(-) create mode 100644 services/batch/2019-06-01.9.0/batch/account.go create mode 100644 services/batch/2019-06-01.9.0/batch/application.go create mode 100644 services/batch/2019-06-01.9.0/batch/batchapi/interfaces.go create mode 100644 services/batch/2019-06-01.9.0/batch/certificate.go create mode 100644 services/batch/2019-06-01.9.0/batch/client.go create mode 100644 services/batch/2019-06-01.9.0/batch/computenode.go create mode 100644 services/batch/2019-06-01.9.0/batch/file.go create mode 100644 services/batch/2019-06-01.9.0/batch/job.go create mode 100644 services/batch/2019-06-01.9.0/batch/jobschedule.go create mode 100644 services/batch/2019-06-01.9.0/batch/models.go create mode 100644 services/batch/2019-06-01.9.0/batch/pool.go create mode 100644 services/batch/2019-06-01.9.0/batch/task.go create mode 100644 services/batch/2019-06-01.9.0/batch/version.go diff --git a/profiles/latest/batch/batch/batchapi/models.go b/profiles/latest/batch/batch/batchapi/models.go index ac07f0a334b4..a868766ef3b4 100644 --- a/profiles/latest/batch/batch/batchapi/models.go +++ b/profiles/latest/batch/batch/batchapi/models.go @@ -19,7 +19,7 @@ package batchapi -import original "github.com/Azure/azure-sdk-for-go/services/batch/2018-12-01.8.0/batch/batchapi" +import original "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch/batchapi" type AccountClientAPI = original.AccountClientAPI type ApplicationClientAPI = original.ApplicationClientAPI diff --git a/profiles/latest/batch/batch/models.go b/profiles/latest/batch/batch/models.go index 71af2a66e80d..4da0853dbff3 100644 --- a/profiles/latest/batch/batch/models.go +++ b/profiles/latest/batch/batch/models.go @@ -22,7 +22,7 @@ package batch import ( "context" - original "github.com/Azure/azure-sdk-for-go/services/batch/2018-12-01.8.0/batch" + original "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch" ) type AccessScope = original.AccessScope @@ -136,6 +136,13 @@ const ( WaitingForStartTask ComputeNodeState = original.WaitingForStartTask ) +type ContainerWorkingDirectory = original.ContainerWorkingDirectory + +const ( + ContainerImageDefault ContainerWorkingDirectory = original.ContainerImageDefault + TaskWorkingDirectory ContainerWorkingDirectory = original.TaskWorkingDirectory +) + type DependencyAction = original.DependencyAction const ( @@ -341,10 +348,17 @@ const ( TaskStateRunning TaskState = original.TaskStateRunning ) +type VerificationType = original.VerificationType + +const ( + Unverified VerificationType = original.Unverified + Verified VerificationType = original.Verified +) + type AccountClient = original.AccountClient -type AccountListNodeAgentSkusResult = original.AccountListNodeAgentSkusResult -type AccountListNodeAgentSkusResultIterator = original.AccountListNodeAgentSkusResultIterator -type AccountListNodeAgentSkusResultPage = original.AccountListNodeAgentSkusResultPage +type AccountListSupportedImagesResult = original.AccountListSupportedImagesResult +type AccountListSupportedImagesResultIterator = original.AccountListSupportedImagesResultIterator +type AccountListSupportedImagesResultPage = original.AccountListSupportedImagesResultPage type AffinityInformation = original.AffinityInformation type ApplicationClient = original.ApplicationClient type ApplicationListResult = original.ApplicationListResult @@ -410,6 +424,7 @@ type ExitConditions = original.ExitConditions type ExitOptions = original.ExitOptions type FileClient = original.FileClient type FileProperties = original.FileProperties +type ImageInformation = original.ImageInformation type ImageReference = original.ImageReference type InboundEndpoint = original.InboundEndpoint type InboundNATPool = original.InboundNATPool @@ -444,7 +459,6 @@ type NameValuePair = original.NameValuePair type NetworkConfiguration = original.NetworkConfiguration type NetworkSecurityGroupRule = original.NetworkSecurityGroupRule type NodeAgentInformation = original.NodeAgentInformation -type NodeAgentSku = original.NodeAgentSku type NodeCounts = original.NodeCounts type NodeDisableSchedulingParameter = original.NodeDisableSchedulingParameter type NodeFile = original.NodeFile @@ -519,11 +533,11 @@ func New(batchURL string) BaseClient { func NewAccountClient(batchURL string) AccountClient { return original.NewAccountClient(batchURL) } -func NewAccountListNodeAgentSkusResultIterator(page AccountListNodeAgentSkusResultPage) AccountListNodeAgentSkusResultIterator { - return original.NewAccountListNodeAgentSkusResultIterator(page) +func NewAccountListSupportedImagesResultIterator(page AccountListSupportedImagesResultPage) AccountListSupportedImagesResultIterator { + return original.NewAccountListSupportedImagesResultIterator(page) } -func NewAccountListNodeAgentSkusResultPage(getNextPage func(context.Context, AccountListNodeAgentSkusResult) (AccountListNodeAgentSkusResult, error)) AccountListNodeAgentSkusResultPage { - return original.NewAccountListNodeAgentSkusResultPage(getNextPage) +func NewAccountListSupportedImagesResultPage(getNextPage func(context.Context, AccountListSupportedImagesResult) (AccountListSupportedImagesResult, error)) AccountListSupportedImagesResultPage { + return original.NewAccountListSupportedImagesResultPage(getNextPage) } func NewApplicationClient(batchURL string) ApplicationClient { return original.NewApplicationClient(batchURL) @@ -657,6 +671,9 @@ func PossibleComputeNodeReimageOptionValues() []ComputeNodeReimageOption { func PossibleComputeNodeStateValues() []ComputeNodeState { return original.PossibleComputeNodeStateValues() } +func PossibleContainerWorkingDirectoryValues() []ContainerWorkingDirectory { + return original.PossibleContainerWorkingDirectoryValues() +} func PossibleDependencyActionValues() []DependencyAction { return original.PossibleDependencyActionValues() } @@ -738,6 +755,9 @@ func PossibleTaskExecutionResultValues() []TaskExecutionResult { func PossibleTaskStateValues() []TaskState { return original.PossibleTaskStateValues() } +func PossibleVerificationTypeValues() []VerificationType { + return original.PossibleVerificationTypeValues() +} func UserAgent() string { return original.UserAgent() + " profiles/latest" } diff --git a/profiles/preview/batch/batch/batchapi/models.go b/profiles/preview/batch/batch/batchapi/models.go index ac07f0a334b4..a868766ef3b4 100644 --- a/profiles/preview/batch/batch/batchapi/models.go +++ b/profiles/preview/batch/batch/batchapi/models.go @@ -19,7 +19,7 @@ package batchapi -import original "github.com/Azure/azure-sdk-for-go/services/batch/2018-12-01.8.0/batch/batchapi" +import original "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch/batchapi" type AccountClientAPI = original.AccountClientAPI type ApplicationClientAPI = original.ApplicationClientAPI diff --git a/profiles/preview/batch/batch/models.go b/profiles/preview/batch/batch/models.go index ec32e7f038a0..e69de20fca8c 100644 --- a/profiles/preview/batch/batch/models.go +++ b/profiles/preview/batch/batch/models.go @@ -22,7 +22,7 @@ package batch import ( "context" - original "github.com/Azure/azure-sdk-for-go/services/batch/2018-12-01.8.0/batch" + original "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch" ) type AccessScope = original.AccessScope @@ -136,6 +136,13 @@ const ( WaitingForStartTask ComputeNodeState = original.WaitingForStartTask ) +type ContainerWorkingDirectory = original.ContainerWorkingDirectory + +const ( + ContainerImageDefault ContainerWorkingDirectory = original.ContainerImageDefault + TaskWorkingDirectory ContainerWorkingDirectory = original.TaskWorkingDirectory +) + type DependencyAction = original.DependencyAction const ( @@ -341,10 +348,17 @@ const ( TaskStateRunning TaskState = original.TaskStateRunning ) +type VerificationType = original.VerificationType + +const ( + Unverified VerificationType = original.Unverified + Verified VerificationType = original.Verified +) + type AccountClient = original.AccountClient -type AccountListNodeAgentSkusResult = original.AccountListNodeAgentSkusResult -type AccountListNodeAgentSkusResultIterator = original.AccountListNodeAgentSkusResultIterator -type AccountListNodeAgentSkusResultPage = original.AccountListNodeAgentSkusResultPage +type AccountListSupportedImagesResult = original.AccountListSupportedImagesResult +type AccountListSupportedImagesResultIterator = original.AccountListSupportedImagesResultIterator +type AccountListSupportedImagesResultPage = original.AccountListSupportedImagesResultPage type AffinityInformation = original.AffinityInformation type ApplicationClient = original.ApplicationClient type ApplicationListResult = original.ApplicationListResult @@ -410,6 +424,7 @@ type ExitConditions = original.ExitConditions type ExitOptions = original.ExitOptions type FileClient = original.FileClient type FileProperties = original.FileProperties +type ImageInformation = original.ImageInformation type ImageReference = original.ImageReference type InboundEndpoint = original.InboundEndpoint type InboundNATPool = original.InboundNATPool @@ -444,7 +459,6 @@ type NameValuePair = original.NameValuePair type NetworkConfiguration = original.NetworkConfiguration type NetworkSecurityGroupRule = original.NetworkSecurityGroupRule type NodeAgentInformation = original.NodeAgentInformation -type NodeAgentSku = original.NodeAgentSku type NodeCounts = original.NodeCounts type NodeDisableSchedulingParameter = original.NodeDisableSchedulingParameter type NodeFile = original.NodeFile @@ -519,11 +533,11 @@ func New(batchURL string) BaseClient { func NewAccountClient(batchURL string) AccountClient { return original.NewAccountClient(batchURL) } -func NewAccountListNodeAgentSkusResultIterator(page AccountListNodeAgentSkusResultPage) AccountListNodeAgentSkusResultIterator { - return original.NewAccountListNodeAgentSkusResultIterator(page) +func NewAccountListSupportedImagesResultIterator(page AccountListSupportedImagesResultPage) AccountListSupportedImagesResultIterator { + return original.NewAccountListSupportedImagesResultIterator(page) } -func NewAccountListNodeAgentSkusResultPage(getNextPage func(context.Context, AccountListNodeAgentSkusResult) (AccountListNodeAgentSkusResult, error)) AccountListNodeAgentSkusResultPage { - return original.NewAccountListNodeAgentSkusResultPage(getNextPage) +func NewAccountListSupportedImagesResultPage(getNextPage func(context.Context, AccountListSupportedImagesResult) (AccountListSupportedImagesResult, error)) AccountListSupportedImagesResultPage { + return original.NewAccountListSupportedImagesResultPage(getNextPage) } func NewApplicationClient(batchURL string) ApplicationClient { return original.NewApplicationClient(batchURL) @@ -657,6 +671,9 @@ func PossibleComputeNodeReimageOptionValues() []ComputeNodeReimageOption { func PossibleComputeNodeStateValues() []ComputeNodeState { return original.PossibleComputeNodeStateValues() } +func PossibleContainerWorkingDirectoryValues() []ContainerWorkingDirectory { + return original.PossibleContainerWorkingDirectoryValues() +} func PossibleDependencyActionValues() []DependencyAction { return original.PossibleDependencyActionValues() } @@ -738,6 +755,9 @@ func PossibleTaskExecutionResultValues() []TaskExecutionResult { func PossibleTaskStateValues() []TaskState { return original.PossibleTaskStateValues() } +func PossibleVerificationTypeValues() []VerificationType { + return original.PossibleVerificationTypeValues() +} func UserAgent() string { return original.UserAgent() + " profiles/preview" } diff --git a/services/batch/2019-06-01.9.0/batch/account.go b/services/batch/2019-06-01.9.0/batch/account.go new file mode 100644 index 000000000000..c9e76525ae6a --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/account.go @@ -0,0 +1,356 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// AccountClient is the a client for issuing REST requests to the Azure Batch service. +type AccountClient struct { + BaseClient +} + +// NewAccountClient creates an instance of the AccountClient client. +func NewAccountClient(batchURL string) AccountClient { + return AccountClient{New(batchURL)} +} + +// ListPoolNodeCounts gets the number of nodes in each state, grouped by pool. +// Parameters: +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. +// maxResults - the maximum number of items to return in the response. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client AccountClient) ListPoolNodeCounts(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolNodeCountsListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListPoolNodeCounts") + defer func() { + sc := -1 + if result.pnclr.Response.Response != nil { + sc = result.pnclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(10), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.AccountClient", "ListPoolNodeCounts", err.Error()) + } + + result.fn = client.listPoolNodeCountsNextResults + req, err := client.ListPoolNodeCountsPreparer(ctx, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListPoolNodeCounts", nil, "Failure preparing request") + return + } + + resp, err := client.ListPoolNodeCountsSender(req) + if err != nil { + result.pnclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListPoolNodeCounts", resp, "Failure sending request") + return + } + + result.pnclr, err = client.ListPoolNodeCountsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListPoolNodeCounts", resp, "Failure responding to request") + } + + return +} + +// ListPoolNodeCountsPreparer prepares the ListPoolNodeCounts request. +func (client AccountClient) ListPoolNodeCountsPreparer(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 10) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/nodecounts"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListPoolNodeCountsSender sends the ListPoolNodeCounts request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) ListPoolNodeCountsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListPoolNodeCountsResponder handles the response to the ListPoolNodeCounts request. The method always +// closes the http.Response Body. +func (client AccountClient) ListPoolNodeCountsResponder(resp *http.Response) (result PoolNodeCountsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listPoolNodeCountsNextResults retrieves the next set of results, if any. +func (client AccountClient) listPoolNodeCountsNextResults(ctx context.Context, lastResults PoolNodeCountsListResult) (result PoolNodeCountsListResult, err error) { + req, err := lastResults.poolNodeCountsListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listPoolNodeCountsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListPoolNodeCountsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listPoolNodeCountsNextResults", resp, "Failure sending next results request") + } + result, err = client.ListPoolNodeCountsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "listPoolNodeCountsNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListPoolNodeCountsComplete enumerates all values, automatically crossing page boundaries as required. +func (client AccountClient) ListPoolNodeCountsComplete(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolNodeCountsListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListPoolNodeCounts") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListPoolNodeCounts(ctx, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// ListSupportedImages sends the list supported images request. +// Parameters: +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 results will be +// returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client AccountClient) ListSupportedImages(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result AccountListSupportedImagesResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListSupportedImages") + defer func() { + sc := -1 + if result.alsir.Response.Response != nil { + sc = result.alsir.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.AccountClient", "ListSupportedImages", err.Error()) + } + + result.fn = client.listSupportedImagesNextResults + req, err := client.ListSupportedImagesPreparer(ctx, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListSupportedImages", nil, "Failure preparing request") + return + } + + resp, err := client.ListSupportedImagesSender(req) + if err != nil { + result.alsir.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListSupportedImages", resp, "Failure sending request") + return + } + + result.alsir, err = client.ListSupportedImagesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListSupportedImages", resp, "Failure responding to request") + } + + return +} + +// ListSupportedImagesPreparer prepares the ListSupportedImages request. +func (client AccountClient) ListSupportedImagesPreparer(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/supportedimages"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSupportedImagesSender sends the ListSupportedImages request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) ListSupportedImagesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListSupportedImagesResponder handles the response to the ListSupportedImages request. The method always +// closes the http.Response Body. +func (client AccountClient) ListSupportedImagesResponder(resp *http.Response) (result AccountListSupportedImagesResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listSupportedImagesNextResults retrieves the next set of results, if any. +func (client AccountClient) listSupportedImagesNextResults(ctx context.Context, lastResults AccountListSupportedImagesResult) (result AccountListSupportedImagesResult, err error) { + req, err := lastResults.accountListSupportedImagesResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listSupportedImagesNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSupportedImagesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listSupportedImagesNextResults", resp, "Failure sending next results request") + } + result, err = client.ListSupportedImagesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "listSupportedImagesNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListSupportedImagesComplete enumerates all values, automatically crossing page boundaries as required. +func (client AccountClient) ListSupportedImagesComplete(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result AccountListSupportedImagesResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListSupportedImages") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListSupportedImages(ctx, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} diff --git a/services/batch/2019-06-01.9.0/batch/application.go b/services/batch/2019-06-01.9.0/batch/application.go new file mode 100644 index 000000000000..11ddebf06caf --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/application.go @@ -0,0 +1,302 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// ApplicationClient is the a client for issuing REST requests to the Azure Batch service. +type ApplicationClient struct { + BaseClient +} + +// NewApplicationClient creates an instance of the ApplicationClient client. +func NewApplicationClient(batchURL string) ApplicationClient { + return ApplicationClient{New(batchURL)} +} + +// Get this operation returns only applications and versions that are available for use on compute nodes; that is, that +// can be used in an application package reference. For administrator information about applications and versions that +// are not yet available to compute nodes, use the Azure portal or the Azure Resource Manager API. +// Parameters: +// applicationID - the ID of the application. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ApplicationClient) Get(ctx context.Context, applicationID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ApplicationSummary, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, applicationID, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ApplicationClient) GetPreparer(ctx context.Context, applicationID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "applicationId": autorest.Encode("path", applicationID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/applications/{applicationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ApplicationClient) GetResponder(resp *http.Response) (result ApplicationSummary, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List this operation returns only applications and versions that are available for use on compute nodes; that is, +// that can be used in an application package reference. For administrator information about applications and versions +// that are not yet available to compute nodes, use the Azure portal or the Azure Resource Manager API. +// Parameters: +// maxResults - the maximum number of items to return in the response. A maximum of 1000 applications can be +// returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ApplicationClient) List(ctx context.Context, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ApplicationListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.List") + defer func() { + sc := -1 + if result.alr.Response.Response != nil { + sc = result.alr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.ApplicationClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.alr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "List", resp, "Failure sending request") + return + } + + result.alr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ApplicationClient) ListPreparer(ctx context.Context, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/applications"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ApplicationClient) ListResponder(resp *http.Response) (result ApplicationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ApplicationClient) listNextResults(ctx context.Context, lastResults ApplicationListResult) (result ApplicationListResult, err error) { + req, err := lastResults.applicationListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.ApplicationClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ApplicationClient) ListComplete(ctx context.Context, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ApplicationListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} diff --git a/services/batch/2019-06-01.9.0/batch/batchapi/interfaces.go b/services/batch/2019-06-01.9.0/batch/batchapi/interfaces.go new file mode 100644 index 000000000000..7f0fb40210cc --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/batchapi/interfaces.go @@ -0,0 +1,156 @@ +package batchapi + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/satori/go.uuid" +) + +// ApplicationClientAPI contains the set of methods on the ApplicationClient type. +type ApplicationClientAPI interface { + Get(ctx context.Context, applicationID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ApplicationSummary, err error) + List(ctx context.Context, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ApplicationListResultPage, err error) +} + +var _ ApplicationClientAPI = (*batch.ApplicationClient)(nil) + +// PoolClientAPI contains the set of methods on the PoolClient type. +type PoolClientAPI interface { + Add(ctx context.Context, pool batch.PoolAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + Delete(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + DisableAutoScale(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + EnableAutoScale(ctx context.Context, poolID string, poolEnableAutoScaleParameter batch.PoolEnableAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + EvaluateAutoScale(ctx context.Context, poolID string, poolEvaluateAutoScaleParameter batch.PoolEvaluateAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.AutoScaleRun, err error) + Exists(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Get(ctx context.Context, poolID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.CloudPool, err error) + GetAllLifetimeStatistics(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.PoolStatistics, err error) + List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudPoolListResultPage, err error) + ListUsageMetrics(ctx context.Context, startTime *date.Time, endTime *date.Time, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.PoolListUsageMetricsResultPage, err error) + Patch(ctx context.Context, poolID string, poolPatchParameter batch.PoolPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + RemoveNodes(ctx context.Context, poolID string, nodeRemoveParameter batch.NodeRemoveParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Resize(ctx context.Context, poolID string, poolResizeParameter batch.PoolResizeParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + StopResize(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + UpdateProperties(ctx context.Context, poolID string, poolUpdatePropertiesParameter batch.PoolUpdatePropertiesParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) +} + +var _ PoolClientAPI = (*batch.PoolClient)(nil) + +// AccountClientAPI contains the set of methods on the AccountClient type. +type AccountClientAPI interface { + ListPoolNodeCounts(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.PoolNodeCountsListResultPage, err error) + ListSupportedImages(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.AccountListSupportedImagesResultPage, err error) +} + +var _ AccountClientAPI = (*batch.AccountClient)(nil) + +// JobClientAPI contains the set of methods on the JobClient type. +type JobClientAPI interface { + Add(ctx context.Context, job batch.JobAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + Delete(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Disable(ctx context.Context, jobID string, jobDisableParameter batch.JobDisableParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Enable(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Get(ctx context.Context, jobID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.CloudJob, err error) + GetAllLifetimeStatistics(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.JobStatistics, err error) + GetTaskCounts(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.TaskCounts, err error) + List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudJobListResultPage, err error) + ListFromJobSchedule(ctx context.Context, jobScheduleID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudJobListResultPage, err error) + ListPreparationAndReleaseTaskStatus(ctx context.Context, jobID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudJobListPreparationAndReleaseTaskStatusResultPage, err error) + Patch(ctx context.Context, jobID string, jobPatchParameter batch.JobPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Terminate(ctx context.Context, jobID string, jobTerminateParameter *batch.JobTerminateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Update(ctx context.Context, jobID string, jobUpdateParameter batch.JobUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) +} + +var _ JobClientAPI = (*batch.JobClient)(nil) + +// CertificateClientAPI contains the set of methods on the CertificateClient type. +type CertificateClientAPI interface { + Add(ctx context.Context, certificate batch.CertificateAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + CancelDeletion(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + Delete(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + Get(ctx context.Context, thumbprintAlgorithm string, thumbprint string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.Certificate, err error) + List(ctx context.Context, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CertificateListResultPage, err error) +} + +var _ CertificateClientAPI = (*batch.CertificateClient)(nil) + +// FileClientAPI contains the set of methods on the FileClient type. +type FileClientAPI interface { + DeleteFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + DeleteFromTask(ctx context.Context, jobID string, taskID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + GetFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.ReadCloser, err error) + GetFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.ReadCloser, err error) + GetPropertiesFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + GetPropertiesFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + ListFromComputeNode(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.NodeFileListResultPage, err error) + ListFromTask(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.NodeFileListResultPage, err error) +} + +var _ FileClientAPI = (*batch.FileClient)(nil) + +// JobScheduleClientAPI contains the set of methods on the JobScheduleClient type. +type JobScheduleClientAPI interface { + Add(ctx context.Context, cloudJobSchedule batch.JobScheduleAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + Delete(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Disable(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Enable(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Exists(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Get(ctx context.Context, jobScheduleID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.CloudJobSchedule, err error) + List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudJobScheduleListResultPage, err error) + Patch(ctx context.Context, jobScheduleID string, jobSchedulePatchParameter batch.JobSchedulePatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Terminate(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Update(ctx context.Context, jobScheduleID string, jobScheduleUpdateParameter batch.JobScheduleUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) +} + +var _ JobScheduleClientAPI = (*batch.JobScheduleClient)(nil) + +// TaskClientAPI contains the set of methods on the TaskClient type. +type TaskClientAPI interface { + Add(ctx context.Context, jobID string, task batch.TaskAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + AddCollection(ctx context.Context, jobID string, taskCollection batch.TaskAddCollectionParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.TaskAddCollectionResult, err error) + Delete(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Get(ctx context.Context, jobID string, taskID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.CloudTask, err error) + List(ctx context.Context, jobID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudTaskListResultPage, err error) + ListSubtasks(ctx context.Context, jobID string, taskID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudTaskListSubtasksResult, err error) + Reactivate(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Terminate(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) + Update(ctx context.Context, jobID string, taskID string, taskUpdateParameter batch.TaskUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) +} + +var _ TaskClientAPI = (*batch.TaskClient)(nil) + +// ComputeNodeClientAPI contains the set of methods on the ComputeNodeClient type. +type ComputeNodeClientAPI interface { + AddUser(ctx context.Context, poolID string, nodeID string, userParameter batch.ComputeNodeUser, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + DeleteUser(ctx context.Context, poolID string, nodeID string, userName string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + DisableScheduling(ctx context.Context, poolID string, nodeID string, nodeDisableSchedulingParameter *batch.NodeDisableSchedulingParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + EnableScheduling(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + Get(ctx context.Context, poolID string, nodeID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ComputeNode, err error) + GetRemoteDesktop(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ReadCloser, err error) + GetRemoteLoginSettings(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ComputeNodeGetRemoteLoginSettingsResult, err error) + List(ctx context.Context, poolID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ComputeNodeListResultPage, err error) + Reboot(ctx context.Context, poolID string, nodeID string, nodeRebootParameter *batch.NodeRebootParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + Reimage(ctx context.Context, poolID string, nodeID string, nodeReimageParameter *batch.NodeReimageParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + UpdateUser(ctx context.Context, poolID string, nodeID string, userName string, nodeUpdateUserParameter batch.NodeUpdateUserParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) + UploadBatchServiceLogs(ctx context.Context, poolID string, nodeID string, uploadBatchServiceLogsConfiguration batch.UploadBatchServiceLogsConfiguration, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.UploadBatchServiceLogsResult, err error) +} + +var _ ComputeNodeClientAPI = (*batch.ComputeNodeClient)(nil) diff --git a/services/batch/2019-06-01.9.0/batch/certificate.go b/services/batch/2019-06-01.9.0/batch/certificate.go new file mode 100644 index 000000000000..9b584b41f1fd --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/certificate.go @@ -0,0 +1,645 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// CertificateClient is the a client for issuing REST requests to the Azure Batch service. +type CertificateClient struct { + BaseClient +} + +// NewCertificateClient creates an instance of the CertificateClient client. +func NewCertificateClient(batchURL string) CertificateClient { + return CertificateClient{New(batchURL)} +} + +// Add sends the add request. +// Parameters: +// certificate - the certificate to be added. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client CertificateClient) Add(ctx context.Context, certificate CertificateAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Add") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: certificate, + Constraints: []validation.Constraint{{Target: "certificate.Thumbprint", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "certificate.ThumbprintAlgorithm", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "certificate.Data", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("batch.CertificateClient", "Add", err.Error()) + } + + req, err := client.AddPreparer(ctx, certificate, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Add", nil, "Failure preparing request") + return + } + + resp, err := client.AddSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Add", resp, "Failure sending request") + return + } + + result, err = client.AddResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Add", resp, "Failure responding to request") + } + + return +} + +// AddPreparer prepares the Add request. +func (client CertificateClient) AddPreparer(ctx context.Context, certificate CertificateAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/certificates"), + autorest.WithJSON(certificate), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AddSender sends the Add request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateClient) AddSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// AddResponder handles the response to the Add request. The method always +// closes the http.Response Body. +func (client CertificateClient) AddResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// CancelDeletion if you try to delete a certificate that is being used by a pool or compute node, the status of the +// certificate changes to deleteFailed. If you decide that you want to continue using the certificate, you can use this +// operation to set the status of the certificate back to active. If you intend to delete the certificate, you do not +// need to run this operation after the deletion failed. You must make sure that the certificate is not being used by +// any resources, and then you can try again to delete the certificate. +// Parameters: +// thumbprintAlgorithm - the algorithm used to derive the thumbprint parameter. This must be sha1. +// thumbprint - the thumbprint of the certificate being deleted. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client CertificateClient) CancelDeletion(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.CancelDeletion") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CancelDeletionPreparer(ctx, thumbprintAlgorithm, thumbprint, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "CancelDeletion", nil, "Failure preparing request") + return + } + + resp, err := client.CancelDeletionSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "CancelDeletion", resp, "Failure sending request") + return + } + + result, err = client.CancelDeletionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "CancelDeletion", resp, "Failure responding to request") + } + + return +} + +// CancelDeletionPreparer prepares the CancelDeletion request. +func (client CertificateClient) CancelDeletionPreparer(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "thumbprint": autorest.Encode("path", thumbprint), + "thumbprintAlgorithm": autorest.Encode("path", thumbprintAlgorithm), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CancelDeletionSender sends the CancelDeletion request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateClient) CancelDeletionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// CancelDeletionResponder handles the response to the CancelDeletion request. The method always +// closes the http.Response Body. +func (client CertificateClient) CancelDeletionResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete you cannot delete a certificate if a resource (pool or compute node) is using it. Before you can delete a +// certificate, you must therefore make sure that the certificate is not associated with any existing pools, the +// certificate is not installed on any compute nodes (even if you remove a certificate from a pool, it is not removed +// from existing compute nodes in that pool until they restart), and no running tasks depend on the certificate. If you +// try to delete a certificate that is in use, the deletion fails. The certificate status changes to deleteFailed. You +// can use Cancel Delete Certificate to set the status back to active if you decide that you want to continue using the +// certificate. +// Parameters: +// thumbprintAlgorithm - the algorithm used to derive the thumbprint parameter. This must be sha1. +// thumbprint - the thumbprint of the certificate to be deleted. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client CertificateClient) Delete(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, thumbprintAlgorithm, thumbprint, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client CertificateClient) DeletePreparer(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "thumbprint": autorest.Encode("path", thumbprint), + "thumbprintAlgorithm": autorest.Encode("path", thumbprintAlgorithm), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client CertificateClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about the specified certificate. +// Parameters: +// thumbprintAlgorithm - the algorithm used to derive the thumbprint parameter. This must be sha1. +// thumbprint - the thumbprint of the certificate to get. +// selectParameter - an OData $select clause. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client CertificateClient) Get(ctx context.Context, thumbprintAlgorithm string, thumbprint string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result Certificate, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, thumbprintAlgorithm, thumbprint, selectParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client CertificateClient) GetPreparer(ctx context.Context, thumbprintAlgorithm string, thumbprint string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "thumbprint": autorest.Encode("path", thumbprint), + "thumbprintAlgorithm": autorest.Encode("path", thumbprintAlgorithm), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CertificateClient) GetResponder(resp *http.Response) (result Certificate, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List sends the list request. +// Parameters: +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. +// selectParameter - an OData $select clause. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 certificates can be +// returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client CertificateClient) List(ctx context.Context, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CertificateListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.List") + defer func() { + sc := -1 + if result.clr.Response.Response != nil { + sc = result.clr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.CertificateClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.clr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "List", resp, "Failure sending request") + return + } + + result.clr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client CertificateClient) ListPreparer(ctx context.Context, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/certificates"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client CertificateClient) ListResponder(resp *http.Response) (result CertificateListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client CertificateClient) listNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { + req, err := lastResults.certificateListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.CertificateClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.CertificateClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.CertificateClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client CertificateClient) ListComplete(ctx context.Context, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CertificateListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} diff --git a/services/batch/2019-06-01.9.0/batch/client.go b/services/batch/2019-06-01.9.0/batch/client.go new file mode 100644 index 000000000000..866d38884968 --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/client.go @@ -0,0 +1,44 @@ +// Package batch implements the Azure ARM Batch service API version 2019-06-01.9.0. +// +// A client for issuing REST requests to the Azure Batch service. +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// BaseClient is the base client for Batch. +type BaseClient struct { + autorest.Client + BatchURL string +} + +// New creates an instance of the BaseClient client. +func New(batchURL string) BaseClient { + return NewWithoutDefaults(batchURL) +} + +// NewWithoutDefaults creates an instance of the BaseClient client. +func NewWithoutDefaults(batchURL string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BatchURL: batchURL, + } +} diff --git a/services/batch/2019-06-01.9.0/batch/computenode.go b/services/batch/2019-06-01.9.0/batch/computenode.go new file mode 100644 index 000000000000..88e0813c7bd5 --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/computenode.go @@ -0,0 +1,1432 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// ComputeNodeClient is the a client for issuing REST requests to the Azure Batch service. +type ComputeNodeClient struct { + BaseClient +} + +// NewComputeNodeClient creates an instance of the ComputeNodeClient client. +func NewComputeNodeClient(batchURL string) ComputeNodeClient { + return ComputeNodeClient{New(batchURL)} +} + +// AddUser you can add a user account to a node only when it is in the idle or running state. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the machine on which you want to create a user account. +// userParameter - the user account to be created. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) AddUser(ctx context.Context, poolID string, nodeID string, userParameter ComputeNodeUser, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.AddUser") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: userParameter, + Constraints: []validation.Constraint{{Target: "userParameter.Name", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("batch.ComputeNodeClient", "AddUser", err.Error()) + } + + req, err := client.AddUserPreparer(ctx, poolID, nodeID, userParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "AddUser", nil, "Failure preparing request") + return + } + + resp, err := client.AddUserSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "AddUser", resp, "Failure sending request") + return + } + + result, err = client.AddUserResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "AddUser", resp, "Failure responding to request") + } + + return +} + +// AddUserPreparer prepares the AddUser request. +func (client ComputeNodeClient) AddUserPreparer(ctx context.Context, poolID string, nodeID string, userParameter ComputeNodeUser, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/users", pathParameters), + autorest.WithJSON(userParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AddUserSender sends the AddUser request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) AddUserSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// AddUserResponder handles the response to the AddUser request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) AddUserResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteUser you can delete a user account to a node only when it is in the idle or running state. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the machine on which you want to delete a user account. +// userName - the name of the user account to delete. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) DeleteUser(ctx context.Context, poolID string, nodeID string, userName string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.DeleteUser") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteUserPreparer(ctx, poolID, nodeID, userName, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DeleteUser", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteUserSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DeleteUser", resp, "Failure sending request") + return + } + + result, err = client.DeleteUserResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DeleteUser", resp, "Failure responding to request") + } + + return +} + +// DeleteUserPreparer prepares the DeleteUser request. +func (client ComputeNodeClient) DeleteUserPreparer(ctx context.Context, poolID string, nodeID string, userName string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + "userName": autorest.Encode("path", userName), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/users/{userName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteUserSender sends the DeleteUser request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) DeleteUserSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteUserResponder handles the response to the DeleteUser request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) DeleteUserResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// DisableScheduling you can disable task scheduling on a node only if its current scheduling state is enabled. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node on which you want to disable task scheduling. +// nodeDisableSchedulingParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) DisableScheduling(ctx context.Context, poolID string, nodeID string, nodeDisableSchedulingParameter *NodeDisableSchedulingParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.DisableScheduling") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DisableSchedulingPreparer(ctx, poolID, nodeID, nodeDisableSchedulingParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DisableScheduling", nil, "Failure preparing request") + return + } + + resp, err := client.DisableSchedulingSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DisableScheduling", resp, "Failure sending request") + return + } + + result, err = client.DisableSchedulingResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DisableScheduling", resp, "Failure responding to request") + } + + return +} + +// DisableSchedulingPreparer prepares the DisableScheduling request. +func (client ComputeNodeClient) DisableSchedulingPreparer(ctx context.Context, poolID string, nodeID string, nodeDisableSchedulingParameter *NodeDisableSchedulingParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/disablescheduling", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if nodeDisableSchedulingParameter != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(nodeDisableSchedulingParameter)) + } + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DisableSchedulingSender sends the DisableScheduling request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) DisableSchedulingSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DisableSchedulingResponder handles the response to the DisableScheduling request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) DisableSchedulingResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// EnableScheduling you can enable task scheduling on a node only if its current scheduling state is disabled +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node on which you want to enable task scheduling. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) EnableScheduling(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.EnableScheduling") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.EnableSchedulingPreparer(ctx, poolID, nodeID, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "EnableScheduling", nil, "Failure preparing request") + return + } + + resp, err := client.EnableSchedulingSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "EnableScheduling", resp, "Failure sending request") + return + } + + result, err = client.EnableSchedulingResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "EnableScheduling", resp, "Failure responding to request") + } + + return +} + +// EnableSchedulingPreparer prepares the EnableScheduling request. +func (client ComputeNodeClient) EnableSchedulingPreparer(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/enablescheduling", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// EnableSchedulingSender sends the EnableScheduling request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) EnableSchedulingSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// EnableSchedulingResponder handles the response to the EnableScheduling request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) EnableSchedulingResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get sends the get request. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node that you want to get information about. +// selectParameter - an OData $select clause. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) Get(ctx context.Context, poolID string, nodeID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ComputeNode, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, poolID, nodeID, selectParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ComputeNodeClient) GetPreparer(ctx context.Context, poolID string, nodeID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) GetResponder(resp *http.Response) (result ComputeNode, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetRemoteDesktop before you can access a node by using the RDP file, you must create a user account on the node. +// This API can only be invoked on pools created with a cloud service configuration. For pools created with a virtual +// machine configuration, see the GetRemoteLoginSettings API. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node for which you want to get the Remote Desktop Protocol file. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) GetRemoteDesktop(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ReadCloser, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.GetRemoteDesktop") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetRemoteDesktopPreparer(ctx, poolID, nodeID, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteDesktop", nil, "Failure preparing request") + return + } + + resp, err := client.GetRemoteDesktopSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteDesktop", resp, "Failure sending request") + return + } + + result, err = client.GetRemoteDesktopResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteDesktop", resp, "Failure responding to request") + } + + return +} + +// GetRemoteDesktopPreparer prepares the GetRemoteDesktop request. +func (client ComputeNodeClient) GetRemoteDesktopPreparer(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/rdp", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetRemoteDesktopSender sends the GetRemoteDesktop request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) GetRemoteDesktopSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetRemoteDesktopResponder handles the response to the GetRemoteDesktop request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) GetRemoteDesktopResponder(resp *http.Response) (result ReadCloser, err error) { + result.Value = &resp.Body + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK)) + result.Response = autorest.Response{Response: resp} + return +} + +// GetRemoteLoginSettings before you can remotely login to a node using the remote login settings, you must create a +// user account on the node. This API can be invoked only on pools created with the virtual machine configuration +// property. For pools created with a cloud service configuration, see the GetRemoteDesktop API. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node for which to obtain the remote login settings. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) GetRemoteLoginSettings(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ComputeNodeGetRemoteLoginSettingsResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.GetRemoteLoginSettings") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetRemoteLoginSettingsPreparer(ctx, poolID, nodeID, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteLoginSettings", nil, "Failure preparing request") + return + } + + resp, err := client.GetRemoteLoginSettingsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteLoginSettings", resp, "Failure sending request") + return + } + + result, err = client.GetRemoteLoginSettingsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteLoginSettings", resp, "Failure responding to request") + } + + return +} + +// GetRemoteLoginSettingsPreparer prepares the GetRemoteLoginSettings request. +func (client ComputeNodeClient) GetRemoteLoginSettingsPreparer(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/remoteloginsettings", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetRemoteLoginSettingsSender sends the GetRemoteLoginSettings request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) GetRemoteLoginSettingsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetRemoteLoginSettingsResponder handles the response to the GetRemoteLoginSettings request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) GetRemoteLoginSettingsResponder(resp *http.Response) (result ComputeNodeGetRemoteLoginSettingsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List sends the list request. +// Parameters: +// poolID - the ID of the pool from which you want to list nodes. +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. +// selectParameter - an OData $select clause. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 nodes can be returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) List(ctx context.Context, poolID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ComputeNodeListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.List") + defer func() { + sc := -1 + if result.cnlr.Response.Response != nil { + sc = result.cnlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.ComputeNodeClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, poolID, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.cnlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "List", resp, "Failure sending request") + return + } + + result.cnlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ComputeNodeClient) ListPreparer(ctx context.Context, poolID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) ListResponder(resp *http.Response) (result ComputeNodeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ComputeNodeClient) listNextResults(ctx context.Context, lastResults ComputeNodeListResult) (result ComputeNodeListResult, err error) { + req, err := lastResults.computeNodeListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ComputeNodeClient) ListComplete(ctx context.Context, poolID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ComputeNodeListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, poolID, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// Reboot you can restart a node only if it is in an idle or running state. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node that you want to restart. +// nodeRebootParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) Reboot(ctx context.Context, poolID string, nodeID string, nodeRebootParameter *NodeRebootParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.Reboot") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RebootPreparer(ctx, poolID, nodeID, nodeRebootParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reboot", nil, "Failure preparing request") + return + } + + resp, err := client.RebootSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reboot", resp, "Failure sending request") + return + } + + result, err = client.RebootResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reboot", resp, "Failure responding to request") + } + + return +} + +// RebootPreparer prepares the Reboot request. +func (client ComputeNodeClient) RebootPreparer(ctx context.Context, poolID string, nodeID string, nodeRebootParameter *NodeRebootParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/reboot", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if nodeRebootParameter != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(nodeRebootParameter)) + } + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RebootSender sends the Reboot request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) RebootSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RebootResponder handles the response to the Reboot request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) RebootResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Reimage you can reinstall the operating system on a node only if it is in an idle or running state. This API can be +// invoked only on pools created with the cloud service configuration property. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node that you want to restart. +// nodeReimageParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) Reimage(ctx context.Context, poolID string, nodeID string, nodeReimageParameter *NodeReimageParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.Reimage") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ReimagePreparer(ctx, poolID, nodeID, nodeReimageParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reimage", nil, "Failure preparing request") + return + } + + resp, err := client.ReimageSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reimage", resp, "Failure sending request") + return + } + + result, err = client.ReimageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reimage", resp, "Failure responding to request") + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client ComputeNodeClient) ReimagePreparer(ctx context.Context, poolID string, nodeID string, nodeReimageParameter *NodeReimageParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if nodeReimageParameter != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(nodeReimageParameter)) + } + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) ReimageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// UpdateUser this operation replaces of all the updatable properties of the account. For example, if the expiryTime +// element is not specified, the current value is replaced with the default value, not left unmodified. You can update +// a user account on a node only when it is in the idle or running state. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the machine on which you want to update a user account. +// userName - the name of the user account to update. +// nodeUpdateUserParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) UpdateUser(ctx context.Context, poolID string, nodeID string, userName string, nodeUpdateUserParameter NodeUpdateUserParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.UpdateUser") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateUserPreparer(ctx, poolID, nodeID, userName, nodeUpdateUserParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UpdateUser", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateUserSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UpdateUser", resp, "Failure sending request") + return + } + + result, err = client.UpdateUserResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UpdateUser", resp, "Failure responding to request") + } + + return +} + +// UpdateUserPreparer prepares the UpdateUser request. +func (client ComputeNodeClient) UpdateUserPreparer(ctx context.Context, poolID string, nodeID string, userName string, nodeUpdateUserParameter NodeUpdateUserParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + "userName": autorest.Encode("path", userName), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/users/{userName}", pathParameters), + autorest.WithJSON(nodeUpdateUserParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateUserSender sends the UpdateUser request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) UpdateUserSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateUserResponder handles the response to the UpdateUser request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) UpdateUserResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// UploadBatchServiceLogs this is for gathering Azure Batch service log files in an automated fashion from nodes if you +// are experiencing an error and wish to escalate to Azure support. The Azure Batch service log files should be shared +// with Azure support to aid in debugging issues with the Batch service. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node from which you want to upload the Azure Batch service log files. +// uploadBatchServiceLogsConfiguration - the Azure Batch service log files upload configuration. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client ComputeNodeClient) UploadBatchServiceLogs(ctx context.Context, poolID string, nodeID string, uploadBatchServiceLogsConfiguration UploadBatchServiceLogsConfiguration, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result UploadBatchServiceLogsResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.UploadBatchServiceLogs") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: uploadBatchServiceLogsConfiguration, + Constraints: []validation.Constraint{{Target: "uploadBatchServiceLogsConfiguration.ContainerURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "uploadBatchServiceLogsConfiguration.StartTime", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("batch.ComputeNodeClient", "UploadBatchServiceLogs", err.Error()) + } + + req, err := client.UploadBatchServiceLogsPreparer(ctx, poolID, nodeID, uploadBatchServiceLogsConfiguration, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UploadBatchServiceLogs", nil, "Failure preparing request") + return + } + + resp, err := client.UploadBatchServiceLogsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UploadBatchServiceLogs", resp, "Failure sending request") + return + } + + result, err = client.UploadBatchServiceLogsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UploadBatchServiceLogs", resp, "Failure responding to request") + } + + return +} + +// UploadBatchServiceLogsPreparer prepares the UploadBatchServiceLogs request. +func (client ComputeNodeClient) UploadBatchServiceLogsPreparer(ctx context.Context, poolID string, nodeID string, uploadBatchServiceLogsConfiguration UploadBatchServiceLogsConfiguration, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs", pathParameters), + autorest.WithJSON(uploadBatchServiceLogsConfiguration), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UploadBatchServiceLogsSender sends the UploadBatchServiceLogs request. The method will close the +// http.Response Body if it receives an error. +func (client ComputeNodeClient) UploadBatchServiceLogsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UploadBatchServiceLogsResponder handles the response to the UploadBatchServiceLogs request. The method always +// closes the http.Response Body. +func (client ComputeNodeClient) UploadBatchServiceLogsResponder(resp *http.Response) (result UploadBatchServiceLogsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/batch/2019-06-01.9.0/batch/file.go b/services/batch/2019-06-01.9.0/batch/file.go new file mode 100644 index 000000000000..79926bf74411 --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/file.go @@ -0,0 +1,1102 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// FileClient is the a client for issuing REST requests to the Azure Batch service. +type FileClient struct { + BaseClient +} + +// NewFileClient creates an instance of the FileClient client. +func NewFileClient(batchURL string) FileClient { + return FileClient{New(batchURL)} +} + +// DeleteFromComputeNode sends the delete from compute node request. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node from which you want to delete the file. +// filePath - the path to the file or directory that you want to delete. +// recursive - whether to delete children of a directory. If the filePath parameter represents a directory +// instead of a file, you can set recursive to true to delete the directory and all of the files and +// subdirectories in it. If recursive is false then the directory must be empty or deletion will fail. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client FileClient) DeleteFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.DeleteFromComputeNode") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteFromComputeNodePreparer(ctx, poolID, nodeID, filePath, recursive, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromComputeNode", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteFromComputeNodeSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromComputeNode", resp, "Failure sending request") + return + } + + result, err = client.DeleteFromComputeNodeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromComputeNode", resp, "Failure responding to request") + } + + return +} + +// DeleteFromComputeNodePreparer prepares the DeleteFromComputeNode request. +func (client FileClient) DeleteFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "filePath": autorest.Encode("path", filePath), + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if recursive != nil { + queryParameters["recursive"] = autorest.Encode("query", *recursive) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files/{filePath}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteFromComputeNodeSender sends the DeleteFromComputeNode request. The method will close the +// http.Response Body if it receives an error. +func (client FileClient) DeleteFromComputeNodeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteFromComputeNodeResponder handles the response to the DeleteFromComputeNode request. The method always +// closes the http.Response Body. +func (client FileClient) DeleteFromComputeNodeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteFromTask sends the delete from task request. +// Parameters: +// jobID - the ID of the job that contains the task. +// taskID - the ID of the task whose file you want to delete. +// filePath - the path to the task file or directory that you want to delete. +// recursive - whether to delete children of a directory. If the filePath parameter represents a directory +// instead of a file, you can set recursive to true to delete the directory and all of the files and +// subdirectories in it. If recursive is false then the directory must be empty or deletion will fail. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client FileClient) DeleteFromTask(ctx context.Context, jobID string, taskID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.DeleteFromTask") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteFromTaskPreparer(ctx, jobID, taskID, filePath, recursive, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromTask", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteFromTaskSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromTask", resp, "Failure sending request") + return + } + + result, err = client.DeleteFromTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromTask", resp, "Failure responding to request") + } + + return +} + +// DeleteFromTaskPreparer prepares the DeleteFromTask request. +func (client FileClient) DeleteFromTaskPreparer(ctx context.Context, jobID string, taskID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "filePath": autorest.Encode("path", filePath), + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if recursive != nil { + queryParameters["recursive"] = autorest.Encode("query", *recursive) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files/{filePath}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteFromTaskSender sends the DeleteFromTask request. The method will close the +// http.Response Body if it receives an error. +func (client FileClient) DeleteFromTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteFromTaskResponder handles the response to the DeleteFromTask request. The method always +// closes the http.Response Body. +func (client FileClient) DeleteFromTaskResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetFromComputeNode returns the content of the specified compute node file. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node that contains the file. +// filePath - the path to the compute node file that you want to get the content of. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ocpRange - the byte range to be retrieved. The default is to retrieve the entire file. The format is +// bytes=startRange-endRange. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client FileClient) GetFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result ReadCloser, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.GetFromComputeNode") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetFromComputeNodePreparer(ctx, poolID, nodeID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ocpRange, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromComputeNode", nil, "Failure preparing request") + return + } + + resp, err := client.GetFromComputeNodeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromComputeNode", resp, "Failure sending request") + return + } + + result, err = client.GetFromComputeNodeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromComputeNode", resp, "Failure responding to request") + } + + return +} + +// GetFromComputeNodePreparer prepares the GetFromComputeNode request. +func (client FileClient) GetFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "filePath": autorest.Encode("path", filePath), + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files/{filePath}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ocpRange) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-range", autorest.String(ocpRange))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetFromComputeNodeSender sends the GetFromComputeNode request. The method will close the +// http.Response Body if it receives an error. +func (client FileClient) GetFromComputeNodeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetFromComputeNodeResponder handles the response to the GetFromComputeNode request. The method always +// closes the http.Response Body. +func (client FileClient) GetFromComputeNodeResponder(resp *http.Response) (result ReadCloser, err error) { + result.Value = &resp.Body + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK)) + result.Response = autorest.Response{Response: resp} + return +} + +// GetFromTask returns the content of the specified task file. +// Parameters: +// jobID - the ID of the job that contains the task. +// taskID - the ID of the task whose file you want to retrieve. +// filePath - the path to the task file that you want to get the content of. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ocpRange - the byte range to be retrieved. The default is to retrieve the entire file. The format is +// bytes=startRange-endRange. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client FileClient) GetFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result ReadCloser, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.GetFromTask") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetFromTaskPreparer(ctx, jobID, taskID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ocpRange, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromTask", nil, "Failure preparing request") + return + } + + resp, err := client.GetFromTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromTask", resp, "Failure sending request") + return + } + + result, err = client.GetFromTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromTask", resp, "Failure responding to request") + } + + return +} + +// GetFromTaskPreparer prepares the GetFromTask request. +func (client FileClient) GetFromTaskPreparer(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "filePath": autorest.Encode("path", filePath), + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files/{filePath}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ocpRange) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-range", autorest.String(ocpRange))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetFromTaskSender sends the GetFromTask request. The method will close the +// http.Response Body if it receives an error. +func (client FileClient) GetFromTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetFromTaskResponder handles the response to the GetFromTask request. The method always +// closes the http.Response Body. +func (client FileClient) GetFromTaskResponder(resp *http.Response) (result ReadCloser, err error) { + result.Value = &resp.Body + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK)) + result.Response = autorest.Response{Response: resp} + return +} + +// GetPropertiesFromComputeNode gets the properties of the specified compute node file. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node that contains the file. +// filePath - the path to the compute node file that you want to get the properties of. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client FileClient) GetPropertiesFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.GetPropertiesFromComputeNode") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPropertiesFromComputeNodePreparer(ctx, poolID, nodeID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromComputeNode", nil, "Failure preparing request") + return + } + + resp, err := client.GetPropertiesFromComputeNodeSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromComputeNode", resp, "Failure sending request") + return + } + + result, err = client.GetPropertiesFromComputeNodeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromComputeNode", resp, "Failure responding to request") + } + + return +} + +// GetPropertiesFromComputeNodePreparer prepares the GetPropertiesFromComputeNode request. +func (client FileClient) GetPropertiesFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "filePath": autorest.Encode("path", filePath), + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files/{filePath}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetPropertiesFromComputeNodeSender sends the GetPropertiesFromComputeNode request. The method will close the +// http.Response Body if it receives an error. +func (client FileClient) GetPropertiesFromComputeNodeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetPropertiesFromComputeNodeResponder handles the response to the GetPropertiesFromComputeNode request. The method always +// closes the http.Response Body. +func (client FileClient) GetPropertiesFromComputeNodeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetPropertiesFromTask gets the properties of the specified task file. +// Parameters: +// jobID - the ID of the job that contains the task. +// taskID - the ID of the task whose file you want to get the properties of. +// filePath - the path to the task file that you want to get the properties of. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client FileClient) GetPropertiesFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.GetPropertiesFromTask") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPropertiesFromTaskPreparer(ctx, jobID, taskID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromTask", nil, "Failure preparing request") + return + } + + resp, err := client.GetPropertiesFromTaskSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromTask", resp, "Failure sending request") + return + } + + result, err = client.GetPropertiesFromTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromTask", resp, "Failure responding to request") + } + + return +} + +// GetPropertiesFromTaskPreparer prepares the GetPropertiesFromTask request. +func (client FileClient) GetPropertiesFromTaskPreparer(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "filePath": autorest.Encode("path", filePath), + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files/{filePath}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetPropertiesFromTaskSender sends the GetPropertiesFromTask request. The method will close the +// http.Response Body if it receives an error. +func (client FileClient) GetPropertiesFromTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetPropertiesFromTaskResponder handles the response to the GetPropertiesFromTask request. The method always +// closes the http.Response Body. +func (client FileClient) GetPropertiesFromTaskResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// ListFromComputeNode sends the list from compute node request. +// Parameters: +// poolID - the ID of the pool that contains the compute node. +// nodeID - the ID of the compute node whose files you want to list. +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. +// recursive - whether to list children of a directory. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 files can be returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client FileClient) ListFromComputeNode(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.ListFromComputeNode") + defer func() { + sc := -1 + if result.nflr.Response.Response != nil { + sc = result.nflr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.FileClient", "ListFromComputeNode", err.Error()) + } + + result.fn = client.listFromComputeNodeNextResults + req, err := client.ListFromComputeNodePreparer(ctx, poolID, nodeID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromComputeNode", nil, "Failure preparing request") + return + } + + resp, err := client.ListFromComputeNodeSender(req) + if err != nil { + result.nflr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromComputeNode", resp, "Failure sending request") + return + } + + result.nflr, err = client.ListFromComputeNodeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromComputeNode", resp, "Failure responding to request") + } + + return +} + +// ListFromComputeNodePreparer prepares the ListFromComputeNode request. +func (client FileClient) ListFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "nodeId": autorest.Encode("path", nodeID), + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if recursive != nil { + queryParameters["recursive"] = autorest.Encode("query", *recursive) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListFromComputeNodeSender sends the ListFromComputeNode request. The method will close the +// http.Response Body if it receives an error. +func (client FileClient) ListFromComputeNodeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListFromComputeNodeResponder handles the response to the ListFromComputeNode request. The method always +// closes the http.Response Body. +func (client FileClient) ListFromComputeNodeResponder(resp *http.Response) (result NodeFileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listFromComputeNodeNextResults retrieves the next set of results, if any. +func (client FileClient) listFromComputeNodeNextResults(ctx context.Context, lastResults NodeFileListResult) (result NodeFileListResult, err error) { + req, err := lastResults.nodeFileListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromComputeNodeNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListFromComputeNodeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromComputeNodeNextResults", resp, "Failure sending next results request") + } + result, err = client.ListFromComputeNodeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "listFromComputeNodeNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListFromComputeNodeComplete enumerates all values, automatically crossing page boundaries as required. +func (client FileClient) ListFromComputeNodeComplete(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.ListFromComputeNode") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListFromComputeNode(ctx, poolID, nodeID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// ListFromTask sends the list from task request. +// Parameters: +// jobID - the ID of the job that contains the task. +// taskID - the ID of the task whose files you want to list. +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. +// recursive - whether to list children of the task directory. This parameter can be used in combination with +// the filter parameter to list specific type of files. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 files can be returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client FileClient) ListFromTask(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.ListFromTask") + defer func() { + sc := -1 + if result.nflr.Response.Response != nil { + sc = result.nflr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.FileClient", "ListFromTask", err.Error()) + } + + result.fn = client.listFromTaskNextResults + req, err := client.ListFromTaskPreparer(ctx, jobID, taskID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromTask", nil, "Failure preparing request") + return + } + + resp, err := client.ListFromTaskSender(req) + if err != nil { + result.nflr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromTask", resp, "Failure sending request") + return + } + + result.nflr, err = client.ListFromTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromTask", resp, "Failure responding to request") + } + + return +} + +// ListFromTaskPreparer prepares the ListFromTask request. +func (client FileClient) ListFromTaskPreparer(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if recursive != nil { + queryParameters["recursive"] = autorest.Encode("query", *recursive) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListFromTaskSender sends the ListFromTask request. The method will close the +// http.Response Body if it receives an error. +func (client FileClient) ListFromTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListFromTaskResponder handles the response to the ListFromTask request. The method always +// closes the http.Response Body. +func (client FileClient) ListFromTaskResponder(resp *http.Response) (result NodeFileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listFromTaskNextResults retrieves the next set of results, if any. +func (client FileClient) listFromTaskNextResults(ctx context.Context, lastResults NodeFileListResult) (result NodeFileListResult, err error) { + req, err := lastResults.nodeFileListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromTaskNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListFromTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromTaskNextResults", resp, "Failure sending next results request") + } + result, err = client.ListFromTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.FileClient", "listFromTaskNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListFromTaskComplete enumerates all values, automatically crossing page boundaries as required. +func (client FileClient) ListFromTaskComplete(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.ListFromTask") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListFromTask(ctx, jobID, taskID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} diff --git a/services/batch/2019-06-01.9.0/batch/job.go b/services/batch/2019-06-01.9.0/batch/job.go new file mode 100644 index 000000000000..02e65a4d44f6 --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/job.go @@ -0,0 +1,1924 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// JobClient is the a client for issuing REST requests to the Azure Batch service. +type JobClient struct { + BaseClient +} + +// NewJobClient creates an instance of the JobClient client. +func NewJobClient(batchURL string) JobClient { + return JobClient{New(batchURL)} +} + +// Add the Batch service supports two ways to control the work done as part of a job. In the first approach, the user +// specifies a Job Manager task. The Batch service launches this task when it is ready to start the job. The Job +// Manager task controls all other tasks that run under this job, by using the Task APIs. In the second approach, the +// user directly controls the execution of tasks under an active job, by using the Task APIs. Also note: when naming +// jobs, avoid including sensitive information such as user names or secret project names. This information may appear +// in telemetry logs accessible to Microsoft Support engineers. +// Parameters: +// job - the job to be added. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client JobClient) Add(ctx context.Context, job JobAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Add") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: job, + Constraints: []validation.Constraint{{Target: "job.ID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobManagerTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.JobManagerTask.ID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobManagerTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobManagerTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.JobManagerTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobManagerTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.JobManagerTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobManagerTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "job.JobPreparationTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.JobPreparationTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobPreparationTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.JobPreparationTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobPreparationTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.JobPreparationTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobPreparationTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "job.JobReleaseTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.JobReleaseTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobReleaseTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.JobReleaseTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobReleaseTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.JobReleaseTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.JobReleaseTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "job.PoolInfo", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.VMSize", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "job.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + }}, + }}, + }}, + {Target: "job.NetworkConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "job.NetworkConfiguration.SubnetID", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("batch.JobClient", "Add", err.Error()) + } + + req, err := client.AddPreparer(ctx, job, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Add", nil, "Failure preparing request") + return + } + + resp, err := client.AddSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobClient", "Add", resp, "Failure sending request") + return + } + + result, err = client.AddResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Add", resp, "Failure responding to request") + } + + return +} + +// AddPreparer prepares the Add request. +func (client JobClient) AddPreparer(ctx context.Context, job JobAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/jobs"), + autorest.WithJSON(job), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AddSender sends the Add request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) AddSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// AddResponder handles the response to the Add request. The method always +// closes the http.Response Body. +func (client JobClient) AddResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deleting a job also deletes all tasks that are part of that job, and all job statistics. This also overrides +// the retention period for task data; that is, if the job contains tasks which are still retained on compute nodes, +// the Batch services deletes those tasks' working directories and all their contents. When a Delete Job request is +// received, the Batch service sets the job to the deleting state. All update operations on a job that is in deleting +// state will fail with status code 409 (Conflict), with additional information indicating that the job is being +// deleted. +// Parameters: +// jobID - the ID of the job to delete. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobClient) Delete(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, jobID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client JobClient) DeletePreparer(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client JobClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Disable the Batch Service immediately moves the job to the disabling state. Batch then uses the disableTasks +// parameter to determine what to do with the currently running tasks of the job. The job remains in the disabling +// state until the disable operation is completed and all tasks have been dealt with according to the disableTasks +// option; the job then moves to the disabled state. No new tasks are started under the job until it moves back to +// active state. If you try to disable a job that is in any state other than active, disabling, or disabled, the +// request fails with status code 409. +// Parameters: +// jobID - the ID of the job to disable. +// jobDisableParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobClient) Disable(ctx context.Context, jobID string, jobDisableParameter JobDisableParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Disable") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DisablePreparer(ctx, jobID, jobDisableParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Disable", nil, "Failure preparing request") + return + } + + resp, err := client.DisableSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobClient", "Disable", resp, "Failure sending request") + return + } + + result, err = client.DisableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Disable", resp, "Failure responding to request") + } + + return +} + +// DisablePreparer prepares the Disable request. +func (client JobClient) DisablePreparer(ctx context.Context, jobID string, jobDisableParameter JobDisableParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/disable", pathParameters), + autorest.WithJSON(jobDisableParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DisableSender sends the Disable request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) DisableSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DisableResponder handles the response to the Disable request. The method always +// closes the http.Response Body. +func (client JobClient) DisableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Enable when you call this API, the Batch service sets a disabled job to the enabling state. After the this operation +// is completed, the job moves to the active state, and scheduling of new tasks under the job resumes. The Batch +// service does not allow a task to remain in the active state for more than 180 days. Therefore, if you enable a job +// containing active tasks which were added more than 180 days ago, those tasks will not run. +// Parameters: +// jobID - the ID of the job to enable. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobClient) Enable(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Enable") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.EnablePreparer(ctx, jobID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Enable", nil, "Failure preparing request") + return + } + + resp, err := client.EnableSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobClient", "Enable", resp, "Failure sending request") + return + } + + result, err = client.EnableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Enable", resp, "Failure responding to request") + } + + return +} + +// EnablePreparer prepares the Enable request. +func (client JobClient) EnablePreparer(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/enable", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// EnableSender sends the Enable request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) EnableSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// EnableResponder handles the response to the Enable request. The method always +// closes the http.Response Body. +func (client JobClient) EnableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get sends the get request. +// Parameters: +// jobID - the ID of the job. +// selectParameter - an OData $select clause. +// expand - an OData $expand clause. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobClient) Get(ctx context.Context, jobID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result CloudJob, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, jobID, selectParameter, expand, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.JobClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client JobClient) GetPreparer(ctx context.Context, jobID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client JobClient) GetResponder(resp *http.Response) (result CloudJob, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAllLifetimeStatistics statistics are aggregated across all jobs that have ever existed in the account, from +// account creation to the last update time of the statistics. The statistics may not be immediately available. The +// Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. +// Parameters: +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client JobClient) GetAllLifetimeStatistics(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result JobStatistics, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.GetAllLifetimeStatistics") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetAllLifetimeStatisticsPreparer(ctx, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "GetAllLifetimeStatistics", nil, "Failure preparing request") + return + } + + resp, err := client.GetAllLifetimeStatisticsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.JobClient", "GetAllLifetimeStatistics", resp, "Failure sending request") + return + } + + result, err = client.GetAllLifetimeStatisticsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "GetAllLifetimeStatistics", resp, "Failure responding to request") + } + + return +} + +// GetAllLifetimeStatisticsPreparer prepares the GetAllLifetimeStatistics request. +func (client JobClient) GetAllLifetimeStatisticsPreparer(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/lifetimejobstats"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAllLifetimeStatisticsSender sends the GetAllLifetimeStatistics request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) GetAllLifetimeStatisticsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetAllLifetimeStatisticsResponder handles the response to the GetAllLifetimeStatistics request. The method always +// closes the http.Response Body. +func (client JobClient) GetAllLifetimeStatisticsResponder(resp *http.Response) (result JobStatistics, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetTaskCounts task counts provide a count of the tasks by active, running or completed task state, and a count of +// tasks which succeeded or failed. Tasks in the preparing state are counted as running. +// Parameters: +// jobID - the ID of the job. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client JobClient) GetTaskCounts(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result TaskCounts, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.GetTaskCounts") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetTaskCountsPreparer(ctx, jobID, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "GetTaskCounts", nil, "Failure preparing request") + return + } + + resp, err := client.GetTaskCountsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.JobClient", "GetTaskCounts", resp, "Failure sending request") + return + } + + result, err = client.GetTaskCountsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "GetTaskCounts", resp, "Failure responding to request") + } + + return +} + +// GetTaskCountsPreparer prepares the GetTaskCounts request. +func (client JobClient) GetTaskCountsPreparer(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/taskcounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetTaskCountsSender sends the GetTaskCounts request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) GetTaskCountsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetTaskCountsResponder handles the response to the GetTaskCounts request. The method always +// closes the http.Response Body. +func (client JobClient) GetTaskCountsResponder(resp *http.Response) (result TaskCounts, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List sends the list request. +// Parameters: +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. +// selectParameter - an OData $select clause. +// expand - an OData $expand clause. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 jobs can be returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client JobClient) List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.List") + defer func() { + sc := -1 + if result.cjlr.Response.Response != nil { + sc = result.cjlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.JobClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.cjlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.JobClient", "List", resp, "Failure sending request") + return + } + + result.cjlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client JobClient) ListPreparer(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/jobs"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client JobClient) ListResponder(resp *http.Response) (result CloudJobListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client JobClient) listNextResults(ctx context.Context, lastResults CloudJobListResult) (result CloudJobListResult, err error) { + req, err := lastResults.cloudJobListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.JobClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.JobClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client JobClient) ListComplete(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// ListFromJobSchedule sends the list from job schedule request. +// Parameters: +// jobScheduleID - the ID of the job schedule from which you want to get a list of jobs. +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. +// selectParameter - an OData $select clause. +// expand - an OData $expand clause. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 jobs can be returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client JobClient) ListFromJobSchedule(ctx context.Context, jobScheduleID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.ListFromJobSchedule") + defer func() { + sc := -1 + if result.cjlr.Response.Response != nil { + sc = result.cjlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.JobClient", "ListFromJobSchedule", err.Error()) + } + + result.fn = client.listFromJobScheduleNextResults + req, err := client.ListFromJobSchedulePreparer(ctx, jobScheduleID, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "ListFromJobSchedule", nil, "Failure preparing request") + return + } + + resp, err := client.ListFromJobScheduleSender(req) + if err != nil { + result.cjlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.JobClient", "ListFromJobSchedule", resp, "Failure sending request") + return + } + + result.cjlr, err = client.ListFromJobScheduleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "ListFromJobSchedule", resp, "Failure responding to request") + } + + return +} + +// ListFromJobSchedulePreparer prepares the ListFromJobSchedule request. +func (client JobClient) ListFromJobSchedulePreparer(ctx context.Context, jobScheduleID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobScheduleId": autorest.Encode("path", jobScheduleID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobschedules/{jobScheduleId}/jobs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListFromJobScheduleSender sends the ListFromJobSchedule request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) ListFromJobScheduleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListFromJobScheduleResponder handles the response to the ListFromJobSchedule request. The method always +// closes the http.Response Body. +func (client JobClient) ListFromJobScheduleResponder(resp *http.Response) (result CloudJobListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listFromJobScheduleNextResults retrieves the next set of results, if any. +func (client JobClient) listFromJobScheduleNextResults(ctx context.Context, lastResults CloudJobListResult) (result CloudJobListResult, err error) { + req, err := lastResults.cloudJobListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.JobClient", "listFromJobScheduleNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListFromJobScheduleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.JobClient", "listFromJobScheduleNextResults", resp, "Failure sending next results request") + } + result, err = client.ListFromJobScheduleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "listFromJobScheduleNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListFromJobScheduleComplete enumerates all values, automatically crossing page boundaries as required. +func (client JobClient) ListFromJobScheduleComplete(ctx context.Context, jobScheduleID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.ListFromJobSchedule") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListFromJobSchedule(ctx, jobScheduleID, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// ListPreparationAndReleaseTaskStatus this API returns the Job Preparation and Job Release task status on all compute +// nodes that have run the Job Preparation or Job Release task. This includes nodes which have since been removed from +// the pool. If this API is invoked on a job which has no Job Preparation or Job Release task, the Batch service +// returns HTTP status code 409 (Conflict) with an error code of JobPreparationTaskNotSpecified. +// Parameters: +// jobID - the ID of the job. +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. +// selectParameter - an OData $select clause. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 tasks can be returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client JobClient) ListPreparationAndReleaseTaskStatus(ctx context.Context, jobID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListPreparationAndReleaseTaskStatusResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.ListPreparationAndReleaseTaskStatus") + defer func() { + sc := -1 + if result.cjlpartsr.Response.Response != nil { + sc = result.cjlpartsr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.JobClient", "ListPreparationAndReleaseTaskStatus", err.Error()) + } + + result.fn = client.listPreparationAndReleaseTaskStatusNextResults + req, err := client.ListPreparationAndReleaseTaskStatusPreparer(ctx, jobID, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "ListPreparationAndReleaseTaskStatus", nil, "Failure preparing request") + return + } + + resp, err := client.ListPreparationAndReleaseTaskStatusSender(req) + if err != nil { + result.cjlpartsr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.JobClient", "ListPreparationAndReleaseTaskStatus", resp, "Failure sending request") + return + } + + result.cjlpartsr, err = client.ListPreparationAndReleaseTaskStatusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "ListPreparationAndReleaseTaskStatus", resp, "Failure responding to request") + } + + return +} + +// ListPreparationAndReleaseTaskStatusPreparer prepares the ListPreparationAndReleaseTaskStatus request. +func (client JobClient) ListPreparationAndReleaseTaskStatusPreparer(ctx context.Context, jobID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/jobpreparationandreleasetaskstatus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListPreparationAndReleaseTaskStatusSender sends the ListPreparationAndReleaseTaskStatus request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) ListPreparationAndReleaseTaskStatusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListPreparationAndReleaseTaskStatusResponder handles the response to the ListPreparationAndReleaseTaskStatus request. The method always +// closes the http.Response Body. +func (client JobClient) ListPreparationAndReleaseTaskStatusResponder(resp *http.Response) (result CloudJobListPreparationAndReleaseTaskStatusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listPreparationAndReleaseTaskStatusNextResults retrieves the next set of results, if any. +func (client JobClient) listPreparationAndReleaseTaskStatusNextResults(ctx context.Context, lastResults CloudJobListPreparationAndReleaseTaskStatusResult) (result CloudJobListPreparationAndReleaseTaskStatusResult, err error) { + req, err := lastResults.cloudJobListPreparationAndReleaseTaskStatusResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.JobClient", "listPreparationAndReleaseTaskStatusNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListPreparationAndReleaseTaskStatusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.JobClient", "listPreparationAndReleaseTaskStatusNextResults", resp, "Failure sending next results request") + } + result, err = client.ListPreparationAndReleaseTaskStatusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "listPreparationAndReleaseTaskStatusNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListPreparationAndReleaseTaskStatusComplete enumerates all values, automatically crossing page boundaries as required. +func (client JobClient) ListPreparationAndReleaseTaskStatusComplete(ctx context.Context, jobID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListPreparationAndReleaseTaskStatusResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.ListPreparationAndReleaseTaskStatus") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListPreparationAndReleaseTaskStatus(ctx, jobID, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// Patch this replaces only the job properties specified in the request. For example, if the job has constraints, and a +// request does not specify the constraints element, then the job keeps the existing constraints. +// Parameters: +// jobID - the ID of the job whose properties you want to update. +// jobPatchParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobClient) Patch(ctx context.Context, jobID string, jobPatchParameter JobPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Patch") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.PatchPreparer(ctx, jobID, jobPatchParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Patch", nil, "Failure preparing request") + return + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobClient", "Patch", resp, "Failure sending request") + return + } + + result, err = client.PatchResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Patch", resp, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client JobClient) PatchPreparer(ctx context.Context, jobID string, jobPatchParameter JobPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}", pathParameters), + autorest.WithJSON(jobPatchParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) PatchSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client JobClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Terminate when a Terminate Job request is received, the Batch service sets the job to the terminating state. The +// Batch service then terminates any running tasks associated with the job and runs any required job release tasks. +// Then the job moves into the completed state. If there are any tasks in the job in the active state, they will remain +// in the active state. Once a job is terminated, new tasks cannot be added and any remaining active tasks will not be +// scheduled. +// Parameters: +// jobID - the ID of the job to terminate. +// jobTerminateParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobClient) Terminate(ctx context.Context, jobID string, jobTerminateParameter *JobTerminateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Terminate") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.TerminatePreparer(ctx, jobID, jobTerminateParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Terminate", nil, "Failure preparing request") + return + } + + resp, err := client.TerminateSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobClient", "Terminate", resp, "Failure sending request") + return + } + + result, err = client.TerminateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Terminate", resp, "Failure responding to request") + } + + return +} + +// TerminatePreparer prepares the Terminate request. +func (client JobClient) TerminatePreparer(ctx context.Context, jobID string, jobTerminateParameter *JobTerminateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/terminate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if jobTerminateParameter != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(jobTerminateParameter)) + } + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// TerminateSender sends the Terminate request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) TerminateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// TerminateResponder handles the response to the Terminate request. The method always +// closes the http.Response Body. +func (client JobClient) TerminateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update this fully replaces all the updatable properties of the job. For example, if the job has constraints +// associated with it and if constraints is not specified with this request, then the Batch service will remove the +// existing constraints. +// Parameters: +// jobID - the ID of the job whose properties you want to update. +// jobUpdateParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobClient) Update(ctx context.Context, jobID string, jobUpdateParameter JobUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Update") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: jobUpdateParameter, + Constraints: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VMSize", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("batch.JobClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, jobID, jobUpdateParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client JobClient) UpdatePreparer(ctx context.Context, jobID string, jobUpdateParameter JobUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}", pathParameters), + autorest.WithJSON(jobUpdateParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client JobClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/services/batch/2019-06-01.9.0/batch/jobschedule.go b/services/batch/2019-06-01.9.0/batch/jobschedule.go new file mode 100644 index 000000000000..b8f71bdf07f5 --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/jobschedule.go @@ -0,0 +1,1521 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// JobScheduleClient is the a client for issuing REST requests to the Azure Batch service. +type JobScheduleClient struct { + BaseClient +} + +// NewJobScheduleClient creates an instance of the JobScheduleClient client. +func NewJobScheduleClient(batchURL string) JobScheduleClient { + return JobScheduleClient{New(batchURL)} +} + +// Add sends the add request. +// Parameters: +// cloudJobSchedule - the job schedule to be added. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client JobScheduleClient) Add(ctx context.Context, cloudJobSchedule JobScheduleAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Add") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: cloudJobSchedule, + Constraints: []validation.Constraint{{Target: "cloudJobSchedule.ID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.Schedule", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.NetworkConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.NetworkConfiguration.SubnetID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "cloudJobSchedule.JobSpecification.JobManagerTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobManagerTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "cloudJobSchedule.JobSpecification.JobPreparationTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "cloudJobSchedule.JobSpecification.JobReleaseTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VMSize", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + }}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("batch.JobScheduleClient", "Add", err.Error()) + } + + req, err := client.AddPreparer(ctx, cloudJobSchedule, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Add", nil, "Failure preparing request") + return + } + + resp, err := client.AddSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Add", resp, "Failure sending request") + return + } + + result, err = client.AddResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Add", resp, "Failure responding to request") + } + + return +} + +// AddPreparer prepares the Add request. +func (client JobScheduleClient) AddPreparer(ctx context.Context, cloudJobSchedule JobScheduleAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/jobschedules"), + autorest.WithJSON(cloudJobSchedule), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AddSender sends the Add request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) AddSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// AddResponder handles the response to the Add request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) AddResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete when you delete a job schedule, this also deletes all jobs and tasks under that schedule. When tasks are +// deleted, all the files in their working directories on the compute nodes are also deleted (the retention period is +// ignored). The job schedule statistics are no longer accessible once the job schedule is deleted, though they are +// still counted towards account lifetime statistics. +// Parameters: +// jobScheduleID - the ID of the job schedule to delete. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobScheduleClient) Delete(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client JobScheduleClient) DeletePreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobScheduleId": autorest.Encode("path", jobScheduleID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Disable no new jobs will be created until the job schedule is enabled again. +// Parameters: +// jobScheduleID - the ID of the job schedule to disable. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobScheduleClient) Disable(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Disable") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DisablePreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Disable", nil, "Failure preparing request") + return + } + + resp, err := client.DisableSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Disable", resp, "Failure sending request") + return + } + + result, err = client.DisableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Disable", resp, "Failure responding to request") + } + + return +} + +// DisablePreparer prepares the Disable request. +func (client JobScheduleClient) DisablePreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobScheduleId": autorest.Encode("path", jobScheduleID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobschedules/{jobScheduleId}/disable", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DisableSender sends the Disable request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) DisableSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DisableResponder handles the response to the Disable request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) DisableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Enable sends the enable request. +// Parameters: +// jobScheduleID - the ID of the job schedule to enable. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobScheduleClient) Enable(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Enable") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.EnablePreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Enable", nil, "Failure preparing request") + return + } + + resp, err := client.EnableSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Enable", resp, "Failure sending request") + return + } + + result, err = client.EnableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Enable", resp, "Failure responding to request") + } + + return +} + +// EnablePreparer prepares the Enable request. +func (client JobScheduleClient) EnablePreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobScheduleId": autorest.Encode("path", jobScheduleID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobschedules/{jobScheduleId}/enable", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// EnableSender sends the Enable request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) EnableSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// EnableResponder handles the response to the Enable request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) EnableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Exists sends the exists request. +// Parameters: +// jobScheduleID - the ID of the job schedule which you want to check. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobScheduleClient) Exists(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Exists") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ExistsPreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Exists", nil, "Failure preparing request") + return + } + + resp, err := client.ExistsSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Exists", resp, "Failure sending request") + return + } + + result, err = client.ExistsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Exists", resp, "Failure responding to request") + } + + return +} + +// ExistsPreparer prepares the Exists request. +func (client JobScheduleClient) ExistsPreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobScheduleId": autorest.Encode("path", jobScheduleID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExistsSender sends the Exists request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) ExistsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ExistsResponder handles the response to the Exists request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) ExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about the specified job schedule. +// Parameters: +// jobScheduleID - the ID of the job schedule to get. +// selectParameter - an OData $select clause. +// expand - an OData $expand clause. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobScheduleClient) Get(ctx context.Context, jobScheduleID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result CloudJobSchedule, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, jobScheduleID, selectParameter, expand, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client JobScheduleClient) GetPreparer(ctx context.Context, jobScheduleID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobScheduleId": autorest.Encode("path", jobScheduleID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) GetResponder(resp *http.Response) (result CloudJobSchedule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List sends the list request. +// Parameters: +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. +// selectParameter - an OData $select clause. +// expand - an OData $expand clause. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 job schedules can be +// returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client JobScheduleClient) List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobScheduleListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.List") + defer func() { + sc := -1 + if result.cjslr.Response.Response != nil { + sc = result.cjslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.JobScheduleClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.cjslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "List", resp, "Failure sending request") + return + } + + result.cjslr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client JobScheduleClient) ListPreparer(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/jobschedules"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) ListResponder(resp *http.Response) (result CloudJobScheduleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client JobScheduleClient) listNextResults(ctx context.Context, lastResults CloudJobScheduleListResult) (result CloudJobScheduleListResult, err error) { + req, err := lastResults.cloudJobScheduleListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.JobScheduleClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.JobScheduleClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client JobScheduleClient) ListComplete(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobScheduleListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// Patch this replaces only the job schedule properties specified in the request. For example, if the schedule property +// is not specified with this request, then the Batch service will keep the existing schedule. Changes to a job +// schedule only impact jobs created by the schedule after the update has taken place; currently running jobs are +// unaffected. +// Parameters: +// jobScheduleID - the ID of the job schedule to update. +// jobSchedulePatchParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobScheduleClient) Patch(ctx context.Context, jobScheduleID string, jobSchedulePatchParameter JobSchedulePatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Patch") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.PatchPreparer(ctx, jobScheduleID, jobSchedulePatchParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Patch", nil, "Failure preparing request") + return + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Patch", resp, "Failure sending request") + return + } + + result, err = client.PatchResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Patch", resp, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client JobScheduleClient) PatchPreparer(ctx context.Context, jobScheduleID string, jobSchedulePatchParameter JobSchedulePatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobScheduleId": autorest.Encode("path", jobScheduleID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters), + autorest.WithJSON(jobSchedulePatchParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) PatchSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Terminate sends the terminate request. +// Parameters: +// jobScheduleID - the ID of the job schedule to terminates. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobScheduleClient) Terminate(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Terminate") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.TerminatePreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Terminate", nil, "Failure preparing request") + return + } + + resp, err := client.TerminateSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Terminate", resp, "Failure sending request") + return + } + + result, err = client.TerminateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Terminate", resp, "Failure responding to request") + } + + return +} + +// TerminatePreparer prepares the Terminate request. +func (client JobScheduleClient) TerminatePreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobScheduleId": autorest.Encode("path", jobScheduleID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobschedules/{jobScheduleId}/terminate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// TerminateSender sends the Terminate request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) TerminateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// TerminateResponder handles the response to the Terminate request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) TerminateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update this fully replaces all the updatable properties of the job schedule. For example, if the schedule property +// is not specified with this request, then the Batch service will remove the existing schedule. Changes to a job +// schedule only impact jobs created by the schedule after the update has taken place; currently running jobs are +// unaffected. +// Parameters: +// jobScheduleID - the ID of the job schedule to update. +// jobScheduleUpdateParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client JobScheduleClient) Update(ctx context.Context, jobScheduleID string, jobScheduleUpdateParameter JobScheduleUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Update") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: jobScheduleUpdateParameter, + Constraints: []validation.Constraint{{Target: "jobScheduleUpdateParameter.Schedule", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.NetworkConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.NetworkConfiguration.SubnetID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VMSize", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + }}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("batch.JobScheduleClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, jobScheduleID, jobScheduleUpdateParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client JobScheduleClient) UpdatePreparer(ctx context.Context, jobScheduleID string, jobScheduleUpdateParameter JobScheduleUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobScheduleId": autorest.Encode("path", jobScheduleID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters), + autorest.WithJSON(jobScheduleUpdateParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client JobScheduleClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client JobScheduleClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/services/batch/2019-06-01.9.0/batch/models.go b/services/batch/2019-06-01.9.0/batch/models.go new file mode 100644 index 000000000000..7c2321ef0b99 --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/models.go @@ -0,0 +1,4219 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "io" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch" + +// AccessScope enumerates the values for access scope. +type AccessScope string + +const ( + // Job Grants access to perform all operations on the job containing the task. + Job AccessScope = "job" +) + +// PossibleAccessScopeValues returns an array of possible values for the AccessScope const type. +func PossibleAccessScopeValues() []AccessScope { + return []AccessScope{Job} +} + +// AllocationState enumerates the values for allocation state. +type AllocationState string + +const ( + // Resizing The pool is resizing; that is, compute nodes are being added to or removed from the pool. + Resizing AllocationState = "resizing" + // Steady The pool is not resizing. There are no changes to the number of nodes in the pool in progress. A + // pool enters this state when it is created and when no operations are being performed on the pool to + // change the number of nodes. + Steady AllocationState = "steady" + // Stopping The pool was resizing, but the user has requested that the resize be stopped, but the stop + // request has not yet been completed. + Stopping AllocationState = "stopping" +) + +// PossibleAllocationStateValues returns an array of possible values for the AllocationState const type. +func PossibleAllocationStateValues() []AllocationState { + return []AllocationState{Resizing, Steady, Stopping} +} + +// AutoUserScope enumerates the values for auto user scope. +type AutoUserScope string + +const ( + // Pool Specifies that the task runs as the common auto user account which is created on every node in a + // pool. + Pool AutoUserScope = "pool" + // Task Specifies that the service should create a new user for the task. + Task AutoUserScope = "task" +) + +// PossibleAutoUserScopeValues returns an array of possible values for the AutoUserScope const type. +func PossibleAutoUserScopeValues() []AutoUserScope { + return []AutoUserScope{Pool, Task} +} + +// CachingType enumerates the values for caching type. +type CachingType string + +const ( + // None The caching mode for the disk is not enabled. + None CachingType = "none" + // ReadOnly The caching mode for the disk is read only. + ReadOnly CachingType = "readonly" + // ReadWrite The caching mode for the disk is read and write. + ReadWrite CachingType = "readwrite" +) + +// PossibleCachingTypeValues returns an array of possible values for the CachingType const type. +func PossibleCachingTypeValues() []CachingType { + return []CachingType{None, ReadOnly, ReadWrite} +} + +// CertificateFormat enumerates the values for certificate format. +type CertificateFormat string + +const ( + // Cer The certificate is a base64-encoded X.509 certificate. + Cer CertificateFormat = "cer" + // Pfx The certificate is a PFX (PKCS#12) formatted certificate or certificate chain. + Pfx CertificateFormat = "pfx" +) + +// PossibleCertificateFormatValues returns an array of possible values for the CertificateFormat const type. +func PossibleCertificateFormatValues() []CertificateFormat { + return []CertificateFormat{Cer, Pfx} +} + +// CertificateState enumerates the values for certificate state. +type CertificateState string + +const ( + // Active The certificate is available for use in pools. + Active CertificateState = "active" + // DeleteFailed The user requested that the certificate be deleted, but there are pools that still have + // references to the certificate, or it is still installed on one or more compute nodes. (The latter can + // occur if the certificate has been removed from the pool, but the node has not yet restarted. Nodes + // refresh their certificates only when they restart.) You may use the cancel certificate delete operation + // to cancel the delete, or the delete certificate operation to retry the delete. + DeleteFailed CertificateState = "deletefailed" + // Deleting The user has requested that the certificate be deleted, but the delete operation has not yet + // completed. You may not reference the certificate when creating or updating pools. + Deleting CertificateState = "deleting" +) + +// PossibleCertificateStateValues returns an array of possible values for the CertificateState const type. +func PossibleCertificateStateValues() []CertificateState { + return []CertificateState{Active, DeleteFailed, Deleting} +} + +// CertificateStoreLocation enumerates the values for certificate store location. +type CertificateStoreLocation string + +const ( + // CurrentUser Certificates should be installed to the CurrentUser certificate store. + CurrentUser CertificateStoreLocation = "currentuser" + // LocalMachine Certificates should be installed to the LocalMachine certificate store. + LocalMachine CertificateStoreLocation = "localmachine" +) + +// PossibleCertificateStoreLocationValues returns an array of possible values for the CertificateStoreLocation const type. +func PossibleCertificateStoreLocationValues() []CertificateStoreLocation { + return []CertificateStoreLocation{CurrentUser, LocalMachine} +} + +// CertificateVisibility enumerates the values for certificate visibility. +type CertificateVisibility string + +const ( + // CertificateVisibilityRemoteUser The certificate should be visible to the user accounts under which users + // remotely access the node. + CertificateVisibilityRemoteUser CertificateVisibility = "remoteuser" + // CertificateVisibilityStartTask The certificate should be visible to the user account under which the + // start task is run. + CertificateVisibilityStartTask CertificateVisibility = "starttask" + // CertificateVisibilityTask The certificate should be visible to the user accounts under which job tasks + // are run. + CertificateVisibilityTask CertificateVisibility = "task" +) + +// PossibleCertificateVisibilityValues returns an array of possible values for the CertificateVisibility const type. +func PossibleCertificateVisibilityValues() []CertificateVisibility { + return []CertificateVisibility{CertificateVisibilityRemoteUser, CertificateVisibilityStartTask, CertificateVisibilityTask} +} + +// ComputeNodeDeallocationOption enumerates the values for compute node deallocation option. +type ComputeNodeDeallocationOption string + +const ( + // Requeue Terminate running task processes and requeue the tasks. The tasks will run again when a node is + // available. Remove nodes as soon as tasks have been terminated. + Requeue ComputeNodeDeallocationOption = "requeue" + // RetainedData Allow currently running tasks to complete, then wait for all task data retention periods to + // expire. Schedule no new tasks while waiting. Remove nodes when all task retention periods have expired. + RetainedData ComputeNodeDeallocationOption = "retaineddata" + // TaskCompletion Allow currently running tasks to complete. Schedule no new tasks while waiting. Remove + // nodes when all tasks have completed. + TaskCompletion ComputeNodeDeallocationOption = "taskcompletion" + // Terminate Terminate running tasks. The tasks will be completed with failureInfo indicating that they + // were terminated, and will not run again. Remove nodes as soon as tasks have been terminated. + Terminate ComputeNodeDeallocationOption = "terminate" +) + +// PossibleComputeNodeDeallocationOptionValues returns an array of possible values for the ComputeNodeDeallocationOption const type. +func PossibleComputeNodeDeallocationOptionValues() []ComputeNodeDeallocationOption { + return []ComputeNodeDeallocationOption{Requeue, RetainedData, TaskCompletion, Terminate} +} + +// ComputeNodeFillType enumerates the values for compute node fill type. +type ComputeNodeFillType string + +const ( + // Pack As many tasks as possible (maxTasksPerNode) should be assigned to each node in the pool before any + // tasks are assigned to the next node in the pool. + Pack ComputeNodeFillType = "pack" + // Spread Tasks should be assigned evenly across all nodes in the pool. + Spread ComputeNodeFillType = "spread" +) + +// PossibleComputeNodeFillTypeValues returns an array of possible values for the ComputeNodeFillType const type. +func PossibleComputeNodeFillTypeValues() []ComputeNodeFillType { + return []ComputeNodeFillType{Pack, Spread} +} + +// ComputeNodeRebootOption enumerates the values for compute node reboot option. +type ComputeNodeRebootOption string + +const ( + // ComputeNodeRebootOptionRequeue Terminate running task processes and requeue the tasks. The tasks will + // run again when a node is available. Restart the node as soon as tasks have been terminated. + ComputeNodeRebootOptionRequeue ComputeNodeRebootOption = "requeue" + // ComputeNodeRebootOptionRetainedData Allow currently running tasks to complete, then wait for all task + // data retention periods to expire. Schedule no new tasks while waiting. Restart the node when all task + // retention periods have expired. + ComputeNodeRebootOptionRetainedData ComputeNodeRebootOption = "retaineddata" + // ComputeNodeRebootOptionTaskCompletion Allow currently running tasks to complete. Schedule no new tasks + // while waiting. Restart the node when all tasks have completed. + ComputeNodeRebootOptionTaskCompletion ComputeNodeRebootOption = "taskcompletion" + // ComputeNodeRebootOptionTerminate Terminate running tasks. The tasks will be completed with failureInfo + // indicating that they were terminated, and will not run again. Restart the node as soon as tasks have + // been terminated. + ComputeNodeRebootOptionTerminate ComputeNodeRebootOption = "terminate" +) + +// PossibleComputeNodeRebootOptionValues returns an array of possible values for the ComputeNodeRebootOption const type. +func PossibleComputeNodeRebootOptionValues() []ComputeNodeRebootOption { + return []ComputeNodeRebootOption{ComputeNodeRebootOptionRequeue, ComputeNodeRebootOptionRetainedData, ComputeNodeRebootOptionTaskCompletion, ComputeNodeRebootOptionTerminate} +} + +// ComputeNodeReimageOption enumerates the values for compute node reimage option. +type ComputeNodeReimageOption string + +const ( + // ComputeNodeReimageOptionRequeue Terminate running task processes and requeue the tasks. The tasks will + // run again when a node is available. Reimage the node as soon as tasks have been terminated. + ComputeNodeReimageOptionRequeue ComputeNodeReimageOption = "requeue" + // ComputeNodeReimageOptionRetainedData Allow currently running tasks to complete, then wait for all task + // data retention periods to expire. Schedule no new tasks while waiting. Reimage the node when all task + // retention periods have expired. + ComputeNodeReimageOptionRetainedData ComputeNodeReimageOption = "retaineddata" + // ComputeNodeReimageOptionTaskCompletion Allow currently running tasks to complete. Schedule no new tasks + // while waiting. Reimage the node when all tasks have completed. + ComputeNodeReimageOptionTaskCompletion ComputeNodeReimageOption = "taskcompletion" + // ComputeNodeReimageOptionTerminate Terminate running tasks. The tasks will be completed with failureInfo + // indicating that they were terminated, and will not run again. Reimage the node as soon as tasks have + // been terminated. + ComputeNodeReimageOptionTerminate ComputeNodeReimageOption = "terminate" +) + +// PossibleComputeNodeReimageOptionValues returns an array of possible values for the ComputeNodeReimageOption const type. +func PossibleComputeNodeReimageOptionValues() []ComputeNodeReimageOption { + return []ComputeNodeReimageOption{ComputeNodeReimageOptionRequeue, ComputeNodeReimageOptionRetainedData, ComputeNodeReimageOptionTaskCompletion, ComputeNodeReimageOptionTerminate} +} + +// ComputeNodeState enumerates the values for compute node state. +type ComputeNodeState string + +const ( + // Creating The Batch service has obtained the underlying virtual machine from Azure Compute, but it has + // not yet started to join the pool. + Creating ComputeNodeState = "creating" + // Idle The node is not currently running a task. + Idle ComputeNodeState = "idle" + // LeavingPool The node is leaving the pool, either because the user explicitly removed it or because the + // pool is resizing or autoscaling down. + LeavingPool ComputeNodeState = "leavingpool" + // Offline The node is not currently running a task, and scheduling of new tasks to the node is disabled. + Offline ComputeNodeState = "offline" + // Preempted The low-priority node has been preempted. Tasks which were running on the node when it was + // preempted will be rescheduled when another node becomes available. + Preempted ComputeNodeState = "preempted" + // Rebooting The node is rebooting. + Rebooting ComputeNodeState = "rebooting" + // Reimaging The node is reimaging. + Reimaging ComputeNodeState = "reimaging" + // Running The node is running one or more tasks (other than a start task). + Running ComputeNodeState = "running" + // Starting The Batch service is starting on the underlying virtual machine. + Starting ComputeNodeState = "starting" + // StartTaskFailed The start task has failed on the compute node (and exhausted all retries), and + // waitForSuccess is set. The node is not usable for running tasks. + StartTaskFailed ComputeNodeState = "starttaskfailed" + // Unknown The Batch service has lost contact with the node, and does not know its true state. + Unknown ComputeNodeState = "unknown" + // Unusable The node cannot be used for task execution due to errors. + Unusable ComputeNodeState = "unusable" + // WaitingForStartTask The start task has started running on the compute node, but waitForSuccess is set + // and the start task has not yet completed. + WaitingForStartTask ComputeNodeState = "waitingforstarttask" +) + +// PossibleComputeNodeStateValues returns an array of possible values for the ComputeNodeState const type. +func PossibleComputeNodeStateValues() []ComputeNodeState { + return []ComputeNodeState{Creating, Idle, LeavingPool, Offline, Preempted, Rebooting, Reimaging, Running, Starting, StartTaskFailed, Unknown, Unusable, WaitingForStartTask} +} + +// ContainerWorkingDirectory enumerates the values for container working directory. +type ContainerWorkingDirectory string + +const ( + // ContainerImageDefault Use the working directory defined in the container image. Beware that this + // directory will not contain the Resource Files downloaded by Batch. + ContainerImageDefault ContainerWorkingDirectory = "containerImageDefault" + // TaskWorkingDirectory Use the standard Batch service task working directory, which will contain the Task + // Resource Files populated by Batch. + TaskWorkingDirectory ContainerWorkingDirectory = "taskWorkingDirectory" +) + +// PossibleContainerWorkingDirectoryValues returns an array of possible values for the ContainerWorkingDirectory const type. +func PossibleContainerWorkingDirectoryValues() []ContainerWorkingDirectory { + return []ContainerWorkingDirectory{ContainerImageDefault, TaskWorkingDirectory} +} + +// DependencyAction enumerates the values for dependency action. +type DependencyAction string + +const ( + // Block Block the task's dependencies. + Block DependencyAction = "block" + // Satisfy Satisfy the task's dependencies. + Satisfy DependencyAction = "satisfy" +) + +// PossibleDependencyActionValues returns an array of possible values for the DependencyAction const type. +func PossibleDependencyActionValues() []DependencyAction { + return []DependencyAction{Block, Satisfy} +} + +// DisableComputeNodeSchedulingOption enumerates the values for disable compute node scheduling option. +type DisableComputeNodeSchedulingOption string + +const ( + // DisableComputeNodeSchedulingOptionRequeue Terminate running task processes and requeue the tasks. The + // tasks may run again on other compute nodes, or when task scheduling is re-enabled on this node. Enter + // offline state as soon as tasks have been terminated. + DisableComputeNodeSchedulingOptionRequeue DisableComputeNodeSchedulingOption = "requeue" + // DisableComputeNodeSchedulingOptionTaskCompletion Allow currently running tasks to complete. Schedule no + // new tasks while waiting. Enter offline state when all tasks have completed. + DisableComputeNodeSchedulingOptionTaskCompletion DisableComputeNodeSchedulingOption = "taskcompletion" + // DisableComputeNodeSchedulingOptionTerminate Terminate running tasks. The tasks will be completed with + // failureInfo indicating that they were terminated, and will not run again. Enter offline state as soon as + // tasks have been terminated. + DisableComputeNodeSchedulingOptionTerminate DisableComputeNodeSchedulingOption = "terminate" +) + +// PossibleDisableComputeNodeSchedulingOptionValues returns an array of possible values for the DisableComputeNodeSchedulingOption const type. +func PossibleDisableComputeNodeSchedulingOptionValues() []DisableComputeNodeSchedulingOption { + return []DisableComputeNodeSchedulingOption{DisableComputeNodeSchedulingOptionRequeue, DisableComputeNodeSchedulingOptionTaskCompletion, DisableComputeNodeSchedulingOptionTerminate} +} + +// DisableJobOption enumerates the values for disable job option. +type DisableJobOption string + +const ( + // DisableJobOptionRequeue Terminate running tasks and requeue them. The tasks will run again when the job + // is enabled. + DisableJobOptionRequeue DisableJobOption = "requeue" + // DisableJobOptionTerminate Terminate running tasks. The tasks will be completed with failureInfo + // indicating that they were terminated, and will not run again. + DisableJobOptionTerminate DisableJobOption = "terminate" + // DisableJobOptionWait Allow currently running tasks to complete. + DisableJobOptionWait DisableJobOption = "wait" +) + +// PossibleDisableJobOptionValues returns an array of possible values for the DisableJobOption const type. +func PossibleDisableJobOptionValues() []DisableJobOption { + return []DisableJobOption{DisableJobOptionRequeue, DisableJobOptionTerminate, DisableJobOptionWait} +} + +// DynamicVNetAssignmentScope enumerates the values for dynamic v net assignment scope. +type DynamicVNetAssignmentScope string + +const ( + // DynamicVNetAssignmentScopeJob Dynamic VNet assignment is done per-job. + DynamicVNetAssignmentScopeJob DynamicVNetAssignmentScope = "job" + // DynamicVNetAssignmentScopeNone No dynamic VNet assignment is enabled. + DynamicVNetAssignmentScopeNone DynamicVNetAssignmentScope = "none" +) + +// PossibleDynamicVNetAssignmentScopeValues returns an array of possible values for the DynamicVNetAssignmentScope const type. +func PossibleDynamicVNetAssignmentScopeValues() []DynamicVNetAssignmentScope { + return []DynamicVNetAssignmentScope{DynamicVNetAssignmentScopeJob, DynamicVNetAssignmentScopeNone} +} + +// ElevationLevel enumerates the values for elevation level. +type ElevationLevel string + +const ( + // Admin The user is a user with elevated access and operates with full Administrator permissions. + Admin ElevationLevel = "admin" + // NonAdmin The user is a standard user without elevated access. + NonAdmin ElevationLevel = "nonadmin" +) + +// PossibleElevationLevelValues returns an array of possible values for the ElevationLevel const type. +func PossibleElevationLevelValues() []ElevationLevel { + return []ElevationLevel{Admin, NonAdmin} +} + +// ErrorCategory enumerates the values for error category. +type ErrorCategory string + +const ( + // ServerError The error is due to an internal server issue. + ServerError ErrorCategory = "servererror" + // UserError The error is due to a user issue, such as misconfiguration. + UserError ErrorCategory = "usererror" +) + +// PossibleErrorCategoryValues returns an array of possible values for the ErrorCategory const type. +func PossibleErrorCategoryValues() []ErrorCategory { + return []ErrorCategory{ServerError, UserError} +} + +// InboundEndpointProtocol enumerates the values for inbound endpoint protocol. +type InboundEndpointProtocol string + +const ( + // TCP Use TCP for the endpoint. + TCP InboundEndpointProtocol = "tcp" + // UDP Use UDP for the endpoint. + UDP InboundEndpointProtocol = "udp" +) + +// PossibleInboundEndpointProtocolValues returns an array of possible values for the InboundEndpointProtocol const type. +func PossibleInboundEndpointProtocolValues() []InboundEndpointProtocol { + return []InboundEndpointProtocol{TCP, UDP} +} + +// JobAction enumerates the values for job action. +type JobAction string + +const ( + // JobActionDisable Disable the job. This is equivalent to calling the disable job API, with a disableTasks + // value of requeue. + JobActionDisable JobAction = "disable" + // JobActionNone Take no action. + JobActionNone JobAction = "none" + // JobActionTerminate Terminate the job. The terminateReason in the job's executionInfo is set to + // "TaskFailed". + JobActionTerminate JobAction = "terminate" +) + +// PossibleJobActionValues returns an array of possible values for the JobAction const type. +func PossibleJobActionValues() []JobAction { + return []JobAction{JobActionDisable, JobActionNone, JobActionTerminate} +} + +// JobPreparationTaskState enumerates the values for job preparation task state. +type JobPreparationTaskState string + +const ( + // JobPreparationTaskStateCompleted The task has exited with exit code 0, or the task has exhausted its + // retry limit, or the Batch service was unable to start the task due to task preparation errors (such as + // resource file download failures). + JobPreparationTaskStateCompleted JobPreparationTaskState = "completed" + // JobPreparationTaskStateRunning The task is currently running (including retrying). + JobPreparationTaskStateRunning JobPreparationTaskState = "running" +) + +// PossibleJobPreparationTaskStateValues returns an array of possible values for the JobPreparationTaskState const type. +func PossibleJobPreparationTaskStateValues() []JobPreparationTaskState { + return []JobPreparationTaskState{JobPreparationTaskStateCompleted, JobPreparationTaskStateRunning} +} + +// JobReleaseTaskState enumerates the values for job release task state. +type JobReleaseTaskState string + +const ( + // JobReleaseTaskStateCompleted The task has exited with exit code 0, or the task has exhausted its retry + // limit, or the Batch service was unable to start the task due to task preparation errors (such as + // resource file download failures). + JobReleaseTaskStateCompleted JobReleaseTaskState = "completed" + // JobReleaseTaskStateRunning The task is currently running (including retrying). + JobReleaseTaskStateRunning JobReleaseTaskState = "running" +) + +// PossibleJobReleaseTaskStateValues returns an array of possible values for the JobReleaseTaskState const type. +func PossibleJobReleaseTaskStateValues() []JobReleaseTaskState { + return []JobReleaseTaskState{JobReleaseTaskStateCompleted, JobReleaseTaskStateRunning} +} + +// JobScheduleState enumerates the values for job schedule state. +type JobScheduleState string + +const ( + // JobScheduleStateActive The job schedule is active and will create jobs as per its schedule. + JobScheduleStateActive JobScheduleState = "active" + // JobScheduleStateCompleted The schedule has terminated, either by reaching its end time or by the user + // terminating it explicitly. + JobScheduleStateCompleted JobScheduleState = "completed" + // JobScheduleStateDeleting The user has requested that the schedule be deleted, but the delete operation + // is still in progress. The scheduler will not initiate any new jobs for this schedule, and will delete + // any existing jobs and tasks under the schedule, including any active job. The schedule will be deleted + // when all jobs and tasks under the schedule have been deleted. + JobScheduleStateDeleting JobScheduleState = "deleting" + // JobScheduleStateDisabled The user has disabled the schedule. The scheduler will not initiate any new + // jobs will on this schedule, but any existing active job will continue to run. + JobScheduleStateDisabled JobScheduleState = "disabled" + // JobScheduleStateTerminating The schedule has no more work to do, or has been explicitly terminated by + // the user, but the termination operation is still in progress. The scheduler will not initiate any new + // jobs for this schedule, nor is any existing job active. + JobScheduleStateTerminating JobScheduleState = "terminating" +) + +// PossibleJobScheduleStateValues returns an array of possible values for the JobScheduleState const type. +func PossibleJobScheduleStateValues() []JobScheduleState { + return []JobScheduleState{JobScheduleStateActive, JobScheduleStateCompleted, JobScheduleStateDeleting, JobScheduleStateDisabled, JobScheduleStateTerminating} +} + +// JobState enumerates the values for job state. +type JobState string + +const ( + // JobStateActive The job is available to have tasks scheduled. + JobStateActive JobState = "active" + // JobStateCompleted All tasks have terminated, and the system will not accept any more tasks or any + // further changes to the job. + JobStateCompleted JobState = "completed" + // JobStateDeleting A user has requested that the job be deleted, but the delete operation is still in + // progress (for example, because the system is still terminating running tasks). + JobStateDeleting JobState = "deleting" + // JobStateDisabled A user has disabled the job. No tasks are running, and no new tasks will be scheduled. + JobStateDisabled JobState = "disabled" + // JobStateDisabling A user has requested that the job be disabled, but the disable operation is still in + // progress (for example, waiting for tasks to terminate). + JobStateDisabling JobState = "disabling" + // JobStateEnabling A user has requested that the job be enabled, but the enable operation is still in + // progress. + JobStateEnabling JobState = "enabling" + // JobStateTerminating The job is about to complete, either because a Job Manager task has completed or + // because the user has terminated the job, but the terminate operation is still in progress (for example, + // because Job Release tasks are running). + JobStateTerminating JobState = "terminating" +) + +// PossibleJobStateValues returns an array of possible values for the JobState const type. +func PossibleJobStateValues() []JobState { + return []JobState{JobStateActive, JobStateCompleted, JobStateDeleting, JobStateDisabled, JobStateDisabling, JobStateEnabling, JobStateTerminating} +} + +// LoginMode enumerates the values for login mode. +type LoginMode string + +const ( + // Batch The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running + // parallel processes. + Batch LoginMode = "batch" + // Interactive The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows + // VirtualMachineConfiguration pools. If this option is used with an elevated user identity in a Windows + // VirtualMachineConfiguration pool, the user session will not be elevated unless the application executed + // by the task command line is configured to always require administrative privilege or to always require + // maximum privilege. + Interactive LoginMode = "interactive" +) + +// PossibleLoginModeValues returns an array of possible values for the LoginMode const type. +func PossibleLoginModeValues() []LoginMode { + return []LoginMode{Batch, Interactive} +} + +// NetworkSecurityGroupRuleAccess enumerates the values for network security group rule access. +type NetworkSecurityGroupRuleAccess string + +const ( + // Allow Allow access. + Allow NetworkSecurityGroupRuleAccess = "allow" + // Deny Deny access. + Deny NetworkSecurityGroupRuleAccess = "deny" +) + +// PossibleNetworkSecurityGroupRuleAccessValues returns an array of possible values for the NetworkSecurityGroupRuleAccess const type. +func PossibleNetworkSecurityGroupRuleAccessValues() []NetworkSecurityGroupRuleAccess { + return []NetworkSecurityGroupRuleAccess{Allow, Deny} +} + +// OnAllTasksComplete enumerates the values for on all tasks complete. +type OnAllTasksComplete string + +const ( + // NoAction Do nothing. The job remains active unless terminated or disabled by some other means. + NoAction OnAllTasksComplete = "noaction" + // TerminateJob Terminate the job. The job's terminateReason is set to 'AllTasksComplete'. + TerminateJob OnAllTasksComplete = "terminatejob" +) + +// PossibleOnAllTasksCompleteValues returns an array of possible values for the OnAllTasksComplete const type. +func PossibleOnAllTasksCompleteValues() []OnAllTasksComplete { + return []OnAllTasksComplete{NoAction, TerminateJob} +} + +// OnTaskFailure enumerates the values for on task failure. +type OnTaskFailure string + +const ( + // OnTaskFailureNoAction Do nothing. The job remains active unless terminated or disabled by some other + // means. + OnTaskFailureNoAction OnTaskFailure = "noaction" + // OnTaskFailurePerformExitOptionsJobAction Take the action associated with the task exit condition in the + // task's exitConditions collection. (This may still result in no action being taken, if that is what the + // task specifies.) + OnTaskFailurePerformExitOptionsJobAction OnTaskFailure = "performexitoptionsjobaction" +) + +// PossibleOnTaskFailureValues returns an array of possible values for the OnTaskFailure const type. +func PossibleOnTaskFailureValues() []OnTaskFailure { + return []OnTaskFailure{OnTaskFailureNoAction, OnTaskFailurePerformExitOptionsJobAction} +} + +// OSType enumerates the values for os type. +type OSType string + +const ( + // Linux The Linux operating system. + Linux OSType = "linux" + // Windows The Windows operating system. + Windows OSType = "windows" +) + +// PossibleOSTypeValues returns an array of possible values for the OSType const type. +func PossibleOSTypeValues() []OSType { + return []OSType{Linux, Windows} +} + +// OutputFileUploadCondition enumerates the values for output file upload condition. +type OutputFileUploadCondition string + +const ( + // OutputFileUploadConditionTaskCompletion Upload the file(s) after the task process exits, no matter what + // the exit code was. + OutputFileUploadConditionTaskCompletion OutputFileUploadCondition = "taskcompletion" + // OutputFileUploadConditionTaskFailure Upload the file(s) only after the task process exits with a nonzero + // exit code. + OutputFileUploadConditionTaskFailure OutputFileUploadCondition = "taskfailure" + // OutputFileUploadConditionTaskSuccess Upload the file(s) only after the task process exits with an exit + // code of 0. + OutputFileUploadConditionTaskSuccess OutputFileUploadCondition = "tasksuccess" +) + +// PossibleOutputFileUploadConditionValues returns an array of possible values for the OutputFileUploadCondition const type. +func PossibleOutputFileUploadConditionValues() []OutputFileUploadCondition { + return []OutputFileUploadCondition{OutputFileUploadConditionTaskCompletion, OutputFileUploadConditionTaskFailure, OutputFileUploadConditionTaskSuccess} +} + +// PoolLifetimeOption enumerates the values for pool lifetime option. +type PoolLifetimeOption string + +const ( + // PoolLifetimeOptionJob The pool exists for the lifetime of the job to which it is dedicated. The Batch + // service creates the pool when it creates the job. If the 'job' option is applied to a job schedule, the + // Batch service creates a new auto pool for every job created on the schedule. + PoolLifetimeOptionJob PoolLifetimeOption = "job" + // PoolLifetimeOptionJobSchedule The pool exists for the lifetime of the job schedule. The Batch Service + // creates the pool when it creates the first job on the schedule. You may apply this option only to job + // schedules, not to jobs. + PoolLifetimeOptionJobSchedule PoolLifetimeOption = "jobschedule" +) + +// PossiblePoolLifetimeOptionValues returns an array of possible values for the PoolLifetimeOption const type. +func PossiblePoolLifetimeOptionValues() []PoolLifetimeOption { + return []PoolLifetimeOption{PoolLifetimeOptionJob, PoolLifetimeOptionJobSchedule} +} + +// PoolState enumerates the values for pool state. +type PoolState string + +const ( + // PoolStateActive The pool is available to run tasks subject to the availability of compute nodes. + PoolStateActive PoolState = "active" + // PoolStateDeleting The user has requested that the pool be deleted, but the delete operation has not yet + // completed. + PoolStateDeleting PoolState = "deleting" +) + +// PossiblePoolStateValues returns an array of possible values for the PoolState const type. +func PossiblePoolStateValues() []PoolState { + return []PoolState{PoolStateActive, PoolStateDeleting} +} + +// SchedulingState enumerates the values for scheduling state. +type SchedulingState string + +const ( + // Disabled No new tasks will be scheduled on the node. Tasks already running on the node may still run to + // completion. All nodes start with scheduling enabled. + Disabled SchedulingState = "disabled" + // Enabled Tasks can be scheduled on the node. + Enabled SchedulingState = "enabled" +) + +// PossibleSchedulingStateValues returns an array of possible values for the SchedulingState const type. +func PossibleSchedulingStateValues() []SchedulingState { + return []SchedulingState{Disabled, Enabled} +} + +// StartTaskState enumerates the values for start task state. +type StartTaskState string + +const ( + // StartTaskStateCompleted The start task has exited with exit code 0, or the start task has failed and the + // retry limit has reached, or the start task process did not run due to task preparation errors (such as + // resource file download failures). + StartTaskStateCompleted StartTaskState = "completed" + // StartTaskStateRunning The start task is currently running. + StartTaskStateRunning StartTaskState = "running" +) + +// PossibleStartTaskStateValues returns an array of possible values for the StartTaskState const type. +func PossibleStartTaskStateValues() []StartTaskState { + return []StartTaskState{StartTaskStateCompleted, StartTaskStateRunning} +} + +// StorageAccountType enumerates the values for storage account type. +type StorageAccountType string + +const ( + // PremiumLRS The data disk should use premium locally redundant storage. + PremiumLRS StorageAccountType = "premium_lrs" + // StandardLRS The data disk should use standard locally redundant storage. + StandardLRS StorageAccountType = "standard_lrs" +) + +// PossibleStorageAccountTypeValues returns an array of possible values for the StorageAccountType const type. +func PossibleStorageAccountTypeValues() []StorageAccountType { + return []StorageAccountType{PremiumLRS, StandardLRS} +} + +// SubtaskState enumerates the values for subtask state. +type SubtaskState string + +const ( + // SubtaskStateCompleted The task is no longer eligible to run, usually because the task has finished + // successfully, or the task has finished unsuccessfully and has exhausted its retry limit. A task is also + // marked as completed if an error occurred launching the task, or when the task has been terminated. + SubtaskStateCompleted SubtaskState = "completed" + // SubtaskStatePreparing The task has been assigned to a compute node, but is waiting for a required Job + // Preparation task to complete on the node. If the Job Preparation task succeeds, the task will move to + // running. If the Job Preparation task fails, the task will return to active and will be eligible to be + // assigned to a different node. + SubtaskStatePreparing SubtaskState = "preparing" + // SubtaskStateRunning The task is running on a compute node. This includes task-level preparation such as + // downloading resource files or deploying application packages specified on the task - it does not + // necessarily mean that the task command line has started executing. + SubtaskStateRunning SubtaskState = "running" +) + +// PossibleSubtaskStateValues returns an array of possible values for the SubtaskState const type. +func PossibleSubtaskStateValues() []SubtaskState { + return []SubtaskState{SubtaskStateCompleted, SubtaskStatePreparing, SubtaskStateRunning} +} + +// TaskAddStatus enumerates the values for task add status. +type TaskAddStatus string + +const ( + // TaskAddStatusClientError The task failed to add due to a client error and should not be retried without + // modifying the request as appropriate. + TaskAddStatusClientError TaskAddStatus = "clienterror" + // TaskAddStatusServerError Task failed to add due to a server error and can be retried without + // modification. + TaskAddStatusServerError TaskAddStatus = "servererror" + // TaskAddStatusSuccess The task was added successfully. + TaskAddStatusSuccess TaskAddStatus = "success" +) + +// PossibleTaskAddStatusValues returns an array of possible values for the TaskAddStatus const type. +func PossibleTaskAddStatusValues() []TaskAddStatus { + return []TaskAddStatus{TaskAddStatusClientError, TaskAddStatusServerError, TaskAddStatusSuccess} +} + +// TaskExecutionResult enumerates the values for task execution result. +type TaskExecutionResult string + +const ( + // Failure There was an error during processing of the task. The failure may have occurred before the task + // process was launched, while the task process was executing, or after the task process exited. + Failure TaskExecutionResult = "failure" + // Success The task ran successfully. + Success TaskExecutionResult = "success" +) + +// PossibleTaskExecutionResultValues returns an array of possible values for the TaskExecutionResult const type. +func PossibleTaskExecutionResultValues() []TaskExecutionResult { + return []TaskExecutionResult{Failure, Success} +} + +// TaskState enumerates the values for task state. +type TaskState string + +const ( + // TaskStateActive The task is queued and able to run, but is not currently assigned to a compute node. A + // task enters this state when it is created, when it is enabled after being disabled, or when it is + // awaiting a retry after a failed run. + TaskStateActive TaskState = "active" + // TaskStateCompleted The task is no longer eligible to run, usually because the task has finished + // successfully, or the task has finished unsuccessfully and has exhausted its retry limit. A task is also + // marked as completed if an error occurred launching the task, or when the task has been terminated. + TaskStateCompleted TaskState = "completed" + // TaskStatePreparing The task has been assigned to a compute node, but is waiting for a required Job + // Preparation task to complete on the node. If the Job Preparation task succeeds, the task will move to + // running. If the Job Preparation task fails, the task will return to active and will be eligible to be + // assigned to a different node. + TaskStatePreparing TaskState = "preparing" + // TaskStateRunning The task is running on a compute node. This includes task-level preparation such as + // downloading resource files or deploying application packages specified on the task - it does not + // necessarily mean that the task command line has started executing. + TaskStateRunning TaskState = "running" +) + +// PossibleTaskStateValues returns an array of possible values for the TaskState const type. +func PossibleTaskStateValues() []TaskState { + return []TaskState{TaskStateActive, TaskStateCompleted, TaskStatePreparing, TaskStateRunning} +} + +// VerificationType enumerates the values for verification type. +type VerificationType string + +const ( + // Unverified The associated node agent SKU should have binary compatibility with the Image, but specific + // functionality has not been verified. + Unverified VerificationType = "unverified" + // Verified The Image is guaranteed to be compatible with the associated node agent SKU and all Batch + // features have been confirmed to work as expected. + Verified VerificationType = "verified" +) + +// PossibleVerificationTypeValues returns an array of possible values for the VerificationType const type. +func PossibleVerificationTypeValues() []VerificationType { + return []VerificationType{Unverified, Verified} +} + +// AccountListSupportedImagesResult ... +type AccountListSupportedImagesResult struct { + autorest.Response `json:"-"` + Value *[]ImageInformation `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// AccountListSupportedImagesResultIterator provides access to a complete listing of ImageInformation +// values. +type AccountListSupportedImagesResultIterator struct { + i int + page AccountListSupportedImagesResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AccountListSupportedImagesResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountListSupportedImagesResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AccountListSupportedImagesResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AccountListSupportedImagesResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AccountListSupportedImagesResultIterator) Response() AccountListSupportedImagesResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AccountListSupportedImagesResultIterator) Value() ImageInformation { + if !iter.page.NotDone() { + return ImageInformation{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AccountListSupportedImagesResultIterator type. +func NewAccountListSupportedImagesResultIterator(page AccountListSupportedImagesResultPage) AccountListSupportedImagesResultIterator { + return AccountListSupportedImagesResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (alsir AccountListSupportedImagesResult) IsEmpty() bool { + return alsir.Value == nil || len(*alsir.Value) == 0 +} + +// accountListSupportedImagesResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (alsir AccountListSupportedImagesResult) accountListSupportedImagesResultPreparer(ctx context.Context) (*http.Request, error) { + if alsir.OdataNextLink == nil || len(to.String(alsir.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(alsir.OdataNextLink))) +} + +// AccountListSupportedImagesResultPage contains a page of ImageInformation values. +type AccountListSupportedImagesResultPage struct { + fn func(context.Context, AccountListSupportedImagesResult) (AccountListSupportedImagesResult, error) + alsir AccountListSupportedImagesResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AccountListSupportedImagesResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountListSupportedImagesResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.alsir) + if err != nil { + return err + } + page.alsir = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AccountListSupportedImagesResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AccountListSupportedImagesResultPage) NotDone() bool { + return !page.alsir.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AccountListSupportedImagesResultPage) Response() AccountListSupportedImagesResult { + return page.alsir +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AccountListSupportedImagesResultPage) Values() []ImageInformation { + if page.alsir.IsEmpty() { + return nil + } + return *page.alsir.Value +} + +// Creates a new instance of the AccountListSupportedImagesResultPage type. +func NewAccountListSupportedImagesResultPage(getNextPage func(context.Context, AccountListSupportedImagesResult) (AccountListSupportedImagesResult, error)) AccountListSupportedImagesResultPage { + return AccountListSupportedImagesResultPage{fn: getNextPage} +} + +// AffinityInformation ... +type AffinityInformation struct { + // AffinityID - You can pass the affinityId of a compute node to indicate that this task needs to run on that compute node. Note that this is just a soft affinity. If the target node is busy or unavailable at the time the task is scheduled, then the task will be scheduled elsewhere. + AffinityID *string `json:"affinityId,omitempty"` +} + +// ApplicationListResult ... +type ApplicationListResult struct { + autorest.Response `json:"-"` + Value *[]ApplicationSummary `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// ApplicationListResultIterator provides access to a complete listing of ApplicationSummary values. +type ApplicationListResultIterator struct { + i int + page ApplicationListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ApplicationListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ApplicationListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ApplicationListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ApplicationListResultIterator) Response() ApplicationListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ApplicationListResultIterator) Value() ApplicationSummary { + if !iter.page.NotDone() { + return ApplicationSummary{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ApplicationListResultIterator type. +func NewApplicationListResultIterator(page ApplicationListResultPage) ApplicationListResultIterator { + return ApplicationListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (alr ApplicationListResult) IsEmpty() bool { + return alr.Value == nil || len(*alr.Value) == 0 +} + +// applicationListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (alr ApplicationListResult) applicationListResultPreparer(ctx context.Context) (*http.Request, error) { + if alr.OdataNextLink == nil || len(to.String(alr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(alr.OdataNextLink))) +} + +// ApplicationListResultPage contains a page of ApplicationSummary values. +type ApplicationListResultPage struct { + fn func(context.Context, ApplicationListResult) (ApplicationListResult, error) + alr ApplicationListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ApplicationListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.alr) + if err != nil { + return err + } + page.alr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ApplicationListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ApplicationListResultPage) NotDone() bool { + return !page.alr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ApplicationListResultPage) Response() ApplicationListResult { + return page.alr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ApplicationListResultPage) Values() []ApplicationSummary { + if page.alr.IsEmpty() { + return nil + } + return *page.alr.Value +} + +// Creates a new instance of the ApplicationListResultPage type. +func NewApplicationListResultPage(getNextPage func(context.Context, ApplicationListResult) (ApplicationListResult, error)) ApplicationListResultPage { + return ApplicationListResultPage{fn: getNextPage} +} + +// ApplicationPackageReference ... +type ApplicationPackageReference struct { + ApplicationID *string `json:"applicationId,omitempty"` + // Version - If this is omitted on a pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a task, and no default version is specified for this application, the task fails with a pre-processing error. + Version *string `json:"version,omitempty"` +} + +// ApplicationSummary ... +type ApplicationSummary struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + Versions *[]string `json:"versions,omitempty"` +} + +// AuthenticationTokenSettings ... +type AuthenticationTokenSettings struct { + // Access - The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the job which contains the task. + Access *[]AccessScope `json:"access,omitempty"` +} + +// AutoPoolSpecification ... +type AutoPoolSpecification struct { + // AutoPoolIDPrefix - The Batch service assigns each auto pool a unique identifier on creation. To distinguish between pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. + AutoPoolIDPrefix *string `json:"autoPoolIdPrefix,omitempty"` + // PoolLifetimeOption - Possible values include: 'PoolLifetimeOptionJobSchedule', 'PoolLifetimeOptionJob' + PoolLifetimeOption PoolLifetimeOption `json:"poolLifetimeOption,omitempty"` + // KeepAlive - If false, the Batch service deletes the pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the job or job schedule completes. If true, the Batch service does not delete the pool automatically. It is up to the user to delete auto pools created with this option. + KeepAlive *bool `json:"keepAlive,omitempty"` + Pool *PoolSpecification `json:"pool,omitempty"` +} + +// AutoScaleRun ... +type AutoScaleRun struct { + autorest.Response `json:"-"` + Timestamp *date.Time `json:"timestamp,omitempty"` + // Results - Each variable value is returned in the form $variable=value, and variables are separated by semicolons. + Results *string `json:"results,omitempty"` + Error *AutoScaleRunError `json:"error,omitempty"` +} + +// AutoScaleRunError ... +type AutoScaleRunError struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Values *[]NameValuePair `json:"values,omitempty"` +} + +// AutoUserSpecification ... +type AutoUserSpecification struct { + // Scope - The default value is task. Possible values include: 'Task', 'Pool' + Scope AutoUserScope `json:"scope,omitempty"` + // ElevationLevel - The default value is nonAdmin. Possible values include: 'NonAdmin', 'Admin' + ElevationLevel ElevationLevel `json:"elevationLevel,omitempty"` +} + +// Certificate a certificate that can be installed on compute nodes and can be used to authenticate +// operations on the machine. +type Certificate struct { + autorest.Response `json:"-"` + Thumbprint *string `json:"thumbprint,omitempty"` + ThumbprintAlgorithm *string `json:"thumbprintAlgorithm,omitempty"` + URL *string `json:"url,omitempty"` + // State - Possible values include: 'Active', 'Deleting', 'DeleteFailed' + State CertificateState `json:"state,omitempty"` + StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"` + // PreviousState - This property is not set if the certificate is in its initial active state. Possible values include: 'Active', 'Deleting', 'DeleteFailed' + PreviousState CertificateState `json:"previousState,omitempty"` + // PreviousStateTransitionTime - This property is not set if the certificate is in its initial Active state. + PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"` + PublicData *string `json:"publicData,omitempty"` + // DeleteCertificateError - This property is set only if the certificate is in the DeleteFailed state. + DeleteCertificateError *DeleteCertificateError `json:"deleteCertificateError,omitempty"` +} + +// CertificateAddParameter ... +type CertificateAddParameter struct { + Thumbprint *string `json:"thumbprint,omitempty"` + ThumbprintAlgorithm *string `json:"thumbprintAlgorithm,omitempty"` + Data *string `json:"data,omitempty"` + // CertificateFormat - Possible values include: 'Pfx', 'Cer' + CertificateFormat CertificateFormat `json:"certificateFormat,omitempty"` + // Password - This is required if the certificate format is pfx. It should be omitted if the certificate format is cer. + Password *string `json:"password,omitempty"` +} + +// CertificateListResult ... +type CertificateListResult struct { + autorest.Response `json:"-"` + Value *[]Certificate `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// CertificateListResultIterator provides access to a complete listing of Certificate values. +type CertificateListResultIterator struct { + i int + page CertificateListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CertificateListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CertificateListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CertificateListResultIterator) Response() CertificateListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CertificateListResultIterator) Value() Certificate { + if !iter.page.NotDone() { + return Certificate{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CertificateListResultIterator type. +func NewCertificateListResultIterator(page CertificateListResultPage) CertificateListResultIterator { + return CertificateListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (clr CertificateListResult) IsEmpty() bool { + return clr.Value == nil || len(*clr.Value) == 0 +} + +// certificateListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (clr CertificateListResult) certificateListResultPreparer(ctx context.Context) (*http.Request, error) { + if clr.OdataNextLink == nil || len(to.String(clr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(clr.OdataNextLink))) +} + +// CertificateListResultPage contains a page of Certificate values. +type CertificateListResultPage struct { + fn func(context.Context, CertificateListResult) (CertificateListResult, error) + clr CertificateListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CertificateListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.clr) + if err != nil { + return err + } + page.clr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CertificateListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CertificateListResultPage) NotDone() bool { + return !page.clr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CertificateListResultPage) Response() CertificateListResult { + return page.clr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CertificateListResultPage) Values() []Certificate { + if page.clr.IsEmpty() { + return nil + } + return *page.clr.Value +} + +// Creates a new instance of the CertificateListResultPage type. +func NewCertificateListResultPage(getNextPage func(context.Context, CertificateListResult) (CertificateListResult, error)) CertificateListResultPage { + return CertificateListResultPage{fn: getNextPage} +} + +// CertificateReference ... +type CertificateReference struct { + Thumbprint *string `json:"thumbprint,omitempty"` + ThumbprintAlgorithm *string `json:"thumbprintAlgorithm,omitempty"` + // StoreLocation - The default value is currentuser. This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. Possible values include: 'CurrentUser', 'LocalMachine' + StoreLocation CertificateStoreLocation `json:"storeLocation,omitempty"` + // StoreName - This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. + StoreName *string `json:"storeName,omitempty"` + // Visibility - You can specify more than one visibility in this collection. The default is all accounts. + Visibility *[]CertificateVisibility `json:"visibility,omitempty"` +} + +// CloudJob ... +type CloudJob struct { + autorest.Response `json:"-"` + // ID - The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an account that differ only by case). + ID *string `json:"id,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + UsesTaskDependencies *bool `json:"usesTaskDependencies,omitempty"` + URL *string `json:"url,omitempty"` + // ETag - This is an opaque string. You can use it to detect whether the job has changed between requests. In particular, you can be pass the ETag when updating a job to specify that your changes should take effect only if nobody else has modified the job in the meantime. + ETag *string `json:"eTag,omitempty"` + // LastModified - This is the last time at which the job level data, such as the job state or priority, changed. It does not factor in task-level changes such as adding new tasks or tasks changing state. + LastModified *date.Time `json:"lastModified,omitempty"` + CreationTime *date.Time `json:"creationTime,omitempty"` + // State - Possible values include: 'JobStateActive', 'JobStateDisabling', 'JobStateDisabled', 'JobStateEnabling', 'JobStateTerminating', 'JobStateCompleted', 'JobStateDeleting' + State JobState `json:"state,omitempty"` + StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"` + // PreviousState - This property is not set if the job is in its initial Active state. Possible values include: 'JobStateActive', 'JobStateDisabling', 'JobStateDisabled', 'JobStateEnabling', 'JobStateTerminating', 'JobStateCompleted', 'JobStateDeleting' + PreviousState JobState `json:"previousState,omitempty"` + // PreviousStateTransitionTime - This property is not set if the job is in its initial Active state. + PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"` + // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + Priority *int32 `json:"priority,omitempty"` + Constraints *JobConstraints `json:"constraints,omitempty"` + JobManagerTask *JobManagerTask `json:"jobManagerTask,omitempty"` + // JobPreparationTask - The Job Preparation task is a special task run on each node before any other task of the job. + JobPreparationTask *JobPreparationTask `json:"jobPreparationTask,omitempty"` + // JobReleaseTask - The Job Release task is a special task run at the end of the job on each node that has run any other task of the job. + JobReleaseTask *JobReleaseTask `json:"jobReleaseTask,omitempty"` + // CommonEnvironmentSettings - Individual tasks can override an environment setting specified here by specifying the same setting name with a different value. + CommonEnvironmentSettings *[]EnvironmentSetting `json:"commonEnvironmentSettings,omitempty"` + PoolInfo *PoolInformation `json:"poolInfo,omitempty"` + // OnAllTasksComplete - The default is noaction. Possible values include: 'NoAction', 'TerminateJob' + OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"` + // OnTaskFailure - A task is considered to have failed if has a failureInfo. A failureInfo is set if the task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the task, for example due to a resource file download error. The default is noaction. Possible values include: 'OnTaskFailureNoAction', 'OnTaskFailurePerformExitOptionsJobAction' + OnTaskFailure OnTaskFailure `json:"onTaskFailure,omitempty"` + NetworkConfiguration *JobNetworkConfiguration `json:"networkConfiguration,omitempty"` + // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + Metadata *[]MetadataItem `json:"metadata,omitempty"` + ExecutionInfo *JobExecutionInformation `json:"executionInfo,omitempty"` + // Stats - This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + Stats *JobStatistics `json:"stats,omitempty"` +} + +// CloudJobListPreparationAndReleaseTaskStatusResult ... +type CloudJobListPreparationAndReleaseTaskStatusResult struct { + autorest.Response `json:"-"` + Value *[]JobPreparationAndReleaseTaskExecutionInformation `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// CloudJobListPreparationAndReleaseTaskStatusResultIterator provides access to a complete listing of +// JobPreparationAndReleaseTaskExecutionInformation values. +type CloudJobListPreparationAndReleaseTaskStatusResultIterator struct { + i int + page CloudJobListPreparationAndReleaseTaskStatusResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CloudJobListPreparationAndReleaseTaskStatusResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobListPreparationAndReleaseTaskStatusResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CloudJobListPreparationAndReleaseTaskStatusResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CloudJobListPreparationAndReleaseTaskStatusResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CloudJobListPreparationAndReleaseTaskStatusResultIterator) Response() CloudJobListPreparationAndReleaseTaskStatusResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CloudJobListPreparationAndReleaseTaskStatusResultIterator) Value() JobPreparationAndReleaseTaskExecutionInformation { + if !iter.page.NotDone() { + return JobPreparationAndReleaseTaskExecutionInformation{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CloudJobListPreparationAndReleaseTaskStatusResultIterator type. +func NewCloudJobListPreparationAndReleaseTaskStatusResultIterator(page CloudJobListPreparationAndReleaseTaskStatusResultPage) CloudJobListPreparationAndReleaseTaskStatusResultIterator { + return CloudJobListPreparationAndReleaseTaskStatusResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (cjlpartsr CloudJobListPreparationAndReleaseTaskStatusResult) IsEmpty() bool { + return cjlpartsr.Value == nil || len(*cjlpartsr.Value) == 0 +} + +// cloudJobListPreparationAndReleaseTaskStatusResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cjlpartsr CloudJobListPreparationAndReleaseTaskStatusResult) cloudJobListPreparationAndReleaseTaskStatusResultPreparer(ctx context.Context) (*http.Request, error) { + if cjlpartsr.OdataNextLink == nil || len(to.String(cjlpartsr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cjlpartsr.OdataNextLink))) +} + +// CloudJobListPreparationAndReleaseTaskStatusResultPage contains a page of +// JobPreparationAndReleaseTaskExecutionInformation values. +type CloudJobListPreparationAndReleaseTaskStatusResultPage struct { + fn func(context.Context, CloudJobListPreparationAndReleaseTaskStatusResult) (CloudJobListPreparationAndReleaseTaskStatusResult, error) + cjlpartsr CloudJobListPreparationAndReleaseTaskStatusResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CloudJobListPreparationAndReleaseTaskStatusResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobListPreparationAndReleaseTaskStatusResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.cjlpartsr) + if err != nil { + return err + } + page.cjlpartsr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CloudJobListPreparationAndReleaseTaskStatusResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CloudJobListPreparationAndReleaseTaskStatusResultPage) NotDone() bool { + return !page.cjlpartsr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CloudJobListPreparationAndReleaseTaskStatusResultPage) Response() CloudJobListPreparationAndReleaseTaskStatusResult { + return page.cjlpartsr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CloudJobListPreparationAndReleaseTaskStatusResultPage) Values() []JobPreparationAndReleaseTaskExecutionInformation { + if page.cjlpartsr.IsEmpty() { + return nil + } + return *page.cjlpartsr.Value +} + +// Creates a new instance of the CloudJobListPreparationAndReleaseTaskStatusResultPage type. +func NewCloudJobListPreparationAndReleaseTaskStatusResultPage(getNextPage func(context.Context, CloudJobListPreparationAndReleaseTaskStatusResult) (CloudJobListPreparationAndReleaseTaskStatusResult, error)) CloudJobListPreparationAndReleaseTaskStatusResultPage { + return CloudJobListPreparationAndReleaseTaskStatusResultPage{fn: getNextPage} +} + +// CloudJobListResult ... +type CloudJobListResult struct { + autorest.Response `json:"-"` + Value *[]CloudJob `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// CloudJobListResultIterator provides access to a complete listing of CloudJob values. +type CloudJobListResultIterator struct { + i int + page CloudJobListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CloudJobListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CloudJobListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CloudJobListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CloudJobListResultIterator) Response() CloudJobListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CloudJobListResultIterator) Value() CloudJob { + if !iter.page.NotDone() { + return CloudJob{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CloudJobListResultIterator type. +func NewCloudJobListResultIterator(page CloudJobListResultPage) CloudJobListResultIterator { + return CloudJobListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (cjlr CloudJobListResult) IsEmpty() bool { + return cjlr.Value == nil || len(*cjlr.Value) == 0 +} + +// cloudJobListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cjlr CloudJobListResult) cloudJobListResultPreparer(ctx context.Context) (*http.Request, error) { + if cjlr.OdataNextLink == nil || len(to.String(cjlr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cjlr.OdataNextLink))) +} + +// CloudJobListResultPage contains a page of CloudJob values. +type CloudJobListResultPage struct { + fn func(context.Context, CloudJobListResult) (CloudJobListResult, error) + cjlr CloudJobListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CloudJobListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.cjlr) + if err != nil { + return err + } + page.cjlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CloudJobListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CloudJobListResultPage) NotDone() bool { + return !page.cjlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CloudJobListResultPage) Response() CloudJobListResult { + return page.cjlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CloudJobListResultPage) Values() []CloudJob { + if page.cjlr.IsEmpty() { + return nil + } + return *page.cjlr.Value +} + +// Creates a new instance of the CloudJobListResultPage type. +func NewCloudJobListResultPage(getNextPage func(context.Context, CloudJobListResult) (CloudJobListResult, error)) CloudJobListResultPage { + return CloudJobListResultPage{fn: getNextPage} +} + +// CloudJobSchedule ... +type CloudJobSchedule struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + URL *string `json:"url,omitempty"` + // ETag - This is an opaque string. You can use it to detect whether the job schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. + ETag *string `json:"eTag,omitempty"` + // LastModified - This is the last time at which the schedule level data, such as the job specification or recurrence information, changed. It does not factor in job-level changes such as new jobs being created or jobs changing state. + LastModified *date.Time `json:"lastModified,omitempty"` + CreationTime *date.Time `json:"creationTime,omitempty"` + // State - Possible values include: 'JobScheduleStateActive', 'JobScheduleStateCompleted', 'JobScheduleStateDisabled', 'JobScheduleStateTerminating', 'JobScheduleStateDeleting' + State JobScheduleState `json:"state,omitempty"` + StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"` + // PreviousState - This property is not present if the job schedule is in its initial active state. Possible values include: 'JobScheduleStateActive', 'JobScheduleStateCompleted', 'JobScheduleStateDisabled', 'JobScheduleStateTerminating', 'JobScheduleStateDeleting' + PreviousState JobScheduleState `json:"previousState,omitempty"` + // PreviousStateTransitionTime - This property is not present if the job schedule is in its initial active state. + PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"` + Schedule *Schedule `json:"schedule,omitempty"` + JobSpecification *JobSpecification `json:"jobSpecification,omitempty"` + ExecutionInfo *JobScheduleExecutionInformation `json:"executionInfo,omitempty"` + // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + Metadata *[]MetadataItem `json:"metadata,omitempty"` + Stats *JobScheduleStatistics `json:"stats,omitempty"` +} + +// CloudJobScheduleListResult ... +type CloudJobScheduleListResult struct { + autorest.Response `json:"-"` + Value *[]CloudJobSchedule `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// CloudJobScheduleListResultIterator provides access to a complete listing of CloudJobSchedule values. +type CloudJobScheduleListResultIterator struct { + i int + page CloudJobScheduleListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CloudJobScheduleListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobScheduleListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CloudJobScheduleListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CloudJobScheduleListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CloudJobScheduleListResultIterator) Response() CloudJobScheduleListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CloudJobScheduleListResultIterator) Value() CloudJobSchedule { + if !iter.page.NotDone() { + return CloudJobSchedule{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CloudJobScheduleListResultIterator type. +func NewCloudJobScheduleListResultIterator(page CloudJobScheduleListResultPage) CloudJobScheduleListResultIterator { + return CloudJobScheduleListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (cjslr CloudJobScheduleListResult) IsEmpty() bool { + return cjslr.Value == nil || len(*cjslr.Value) == 0 +} + +// cloudJobScheduleListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cjslr CloudJobScheduleListResult) cloudJobScheduleListResultPreparer(ctx context.Context) (*http.Request, error) { + if cjslr.OdataNextLink == nil || len(to.String(cjslr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cjslr.OdataNextLink))) +} + +// CloudJobScheduleListResultPage contains a page of CloudJobSchedule values. +type CloudJobScheduleListResultPage struct { + fn func(context.Context, CloudJobScheduleListResult) (CloudJobScheduleListResult, error) + cjslr CloudJobScheduleListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CloudJobScheduleListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobScheduleListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.cjslr) + if err != nil { + return err + } + page.cjslr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CloudJobScheduleListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CloudJobScheduleListResultPage) NotDone() bool { + return !page.cjslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CloudJobScheduleListResultPage) Response() CloudJobScheduleListResult { + return page.cjslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CloudJobScheduleListResultPage) Values() []CloudJobSchedule { + if page.cjslr.IsEmpty() { + return nil + } + return *page.cjslr.Value +} + +// Creates a new instance of the CloudJobScheduleListResultPage type. +func NewCloudJobScheduleListResultPage(getNextPage func(context.Context, CloudJobScheduleListResult) (CloudJobScheduleListResult, error)) CloudJobScheduleListResultPage { + return CloudJobScheduleListResultPage{fn: getNextPage} +} + +// CloudPool ... +type CloudPool struct { + autorest.Response `json:"-"` + // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an account that differ only by case). + ID *string `json:"id,omitempty"` + // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + DisplayName *string `json:"displayName,omitempty"` + URL *string `json:"url,omitempty"` + // ETag - This is an opaque string. You can use it to detect whether the pool has changed between requests. In particular, you can be pass the ETag when updating a pool to specify that your changes should take effect only if nobody else has modified the pool in the meantime. + ETag *string `json:"eTag,omitempty"` + // LastModified - This is the last time at which the pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a compute node changing state. + LastModified *date.Time `json:"lastModified,omitempty"` + CreationTime *date.Time `json:"creationTime,omitempty"` + // State - Possible values include: 'PoolStateActive', 'PoolStateDeleting' + State PoolState `json:"state,omitempty"` + StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"` + // AllocationState - Possible values include: 'Steady', 'Resizing', 'Stopping' + AllocationState AllocationState `json:"allocationState,omitempty"` + AllocationStateTransitionTime *date.Time `json:"allocationStateTransitionTime,omitempty"` + // VMSize - For information about available sizes of virtual machines in pools, see Choose a VM size for compute nodes in an Azure Batch pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + VMSize *string `json:"vmSize,omitempty"` + // CloudServiceConfiguration - This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch account was created with its poolAllocationMode property set to 'UserSubscription'. + CloudServiceConfiguration *CloudServiceConfiguration `json:"cloudServiceConfiguration,omitempty"` + // VirtualMachineConfiguration - This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. + VirtualMachineConfiguration *VirtualMachineConfiguration `json:"virtualMachineConfiguration,omitempty"` + // ResizeTimeout - This is the timeout for the most recent resize operation. (The initial sizing when the pool is created counts as a resize.) The default value is 15 minutes. + ResizeTimeout *string `json:"resizeTimeout,omitempty"` + // ResizeErrors - This property is set only if one or more errors occurred during the last pool resize, and only when the pool allocationState is Steady. + ResizeErrors *[]ResizeError `json:"resizeErrors,omitempty"` + CurrentDedicatedNodes *int32 `json:"currentDedicatedNodes,omitempty"` + // CurrentLowPriorityNodes - Low-priority compute nodes which have been preempted are included in this count. + CurrentLowPriorityNodes *int32 `json:"currentLowPriorityNodes,omitempty"` + TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"` + TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"` + // EnableAutoScale - If false, at least one of targetDedicateNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the pool automatically resizes according to the formula. The default value is false. + EnableAutoScale *bool `json:"enableAutoScale,omitempty"` + // AutoScaleFormula - This property is set only if the pool automatically scales, i.e. enableAutoScale is true. + AutoScaleFormula *string `json:"autoScaleFormula,omitempty"` + // AutoScaleEvaluationInterval - This property is set only if the pool automatically scales, i.e. enableAutoScale is true. + AutoScaleEvaluationInterval *string `json:"autoScaleEvaluationInterval,omitempty"` + // AutoScaleRun - This property is set only if the pool automatically scales, i.e. enableAutoScale is true. + AutoScaleRun *AutoScaleRun `json:"autoScaleRun,omitempty"` + // EnableInterNodeCommunication - This imposes restrictions on which nodes can be assigned to the pool. Specifying this value can reduce the chance of the requested number of nodes to be allocated in the pool. + EnableInterNodeCommunication *bool `json:"enableInterNodeCommunication,omitempty"` + NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"` + StartTask *StartTask `json:"startTask,omitempty"` + // CertificateReferences - For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. + CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"` + // ApplicationPackageReferences - Changes to application package references affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged. There is a maximum of 10 application package references on any given pool. + ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"` + // ApplicationLicenses - The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, pool creation will fail. + ApplicationLicenses *[]string `json:"applicationLicenses,omitempty"` + // MaxTasksPerNode - The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + MaxTasksPerNode *int32 `json:"maxTasksPerNode,omitempty"` + TaskSchedulingPolicy *TaskSchedulingPolicy `json:"taskSchedulingPolicy,omitempty"` + UserAccounts *[]UserAccount `json:"userAccounts,omitempty"` + Metadata *[]MetadataItem `json:"metadata,omitempty"` + // Stats - This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + Stats *PoolStatistics `json:"stats,omitempty"` +} + +// CloudPoolListResult ... +type CloudPoolListResult struct { + autorest.Response `json:"-"` + Value *[]CloudPool `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// CloudPoolListResultIterator provides access to a complete listing of CloudPool values. +type CloudPoolListResultIterator struct { + i int + page CloudPoolListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CloudPoolListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudPoolListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CloudPoolListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CloudPoolListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CloudPoolListResultIterator) Response() CloudPoolListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CloudPoolListResultIterator) Value() CloudPool { + if !iter.page.NotDone() { + return CloudPool{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CloudPoolListResultIterator type. +func NewCloudPoolListResultIterator(page CloudPoolListResultPage) CloudPoolListResultIterator { + return CloudPoolListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (cplr CloudPoolListResult) IsEmpty() bool { + return cplr.Value == nil || len(*cplr.Value) == 0 +} + +// cloudPoolListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cplr CloudPoolListResult) cloudPoolListResultPreparer(ctx context.Context) (*http.Request, error) { + if cplr.OdataNextLink == nil || len(to.String(cplr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cplr.OdataNextLink))) +} + +// CloudPoolListResultPage contains a page of CloudPool values. +type CloudPoolListResultPage struct { + fn func(context.Context, CloudPoolListResult) (CloudPoolListResult, error) + cplr CloudPoolListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CloudPoolListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudPoolListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.cplr) + if err != nil { + return err + } + page.cplr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CloudPoolListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CloudPoolListResultPage) NotDone() bool { + return !page.cplr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CloudPoolListResultPage) Response() CloudPoolListResult { + return page.cplr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CloudPoolListResultPage) Values() []CloudPool { + if page.cplr.IsEmpty() { + return nil + } + return *page.cplr.Value +} + +// Creates a new instance of the CloudPoolListResultPage type. +func NewCloudPoolListResultPage(getNextPage func(context.Context, CloudPoolListResult) (CloudPoolListResult, error)) CloudPoolListResultPage { + return CloudPoolListResultPage{fn: getNextPage} +} + +// CloudServiceConfiguration ... +type CloudServiceConfiguration struct { + // OsFamily - Possible values are: + // 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. + // 3 - OS Family 3, equivalent to Windows Server 2012. + // 4 - OS Family 4, equivalent to Windows Server 2012 R2. + // 5 - OS Family 5, equivalent to Windows Server 2016. + // 6 - OS Family 6, equivalent to Windows Server 2019. For more information, see Azure Guest OS Releases (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + OsFamily *string `json:"osFamily,omitempty"` + // OsVersion - The default value is * which specifies the latest operating system version for the specified OS family. + OsVersion *string `json:"osVersion,omitempty"` +} + +// CloudTask batch will retry tasks when a recovery operation is triggered on a compute node. Examples of +// recovery operations include (but are not limited to) when an unhealthy compute node is rebooted or a +// compute node disappeared due to host failure. Retries due to recovery operations are independent of and +// are not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due +// to a recovery operation may occur. Because of this, all tasks should be idempotent. This means tasks +// need to tolerate being interrupted and restarted without causing any corruption or duplicate data. The +// best practice for long running tasks is to use some form of checkpointing. +type CloudTask struct { + autorest.Response `json:"-"` + // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. + ID *string `json:"id,omitempty"` + // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + DisplayName *string `json:"displayName,omitempty"` + URL *string `json:"url,omitempty"` + // ETag - This is an opaque string. You can use it to detect whether the task has changed between requests. In particular, you can be pass the ETag when updating a task to specify that your changes should take effect only if nobody else has modified the task in the meantime. + ETag *string `json:"eTag,omitempty"` + LastModified *date.Time `json:"lastModified,omitempty"` + CreationTime *date.Time `json:"creationTime,omitempty"` + // ExitConditions - How the Batch service should respond when the task completes. + ExitConditions *ExitConditions `json:"exitConditions,omitempty"` + // State - Possible values include: 'TaskStateActive', 'TaskStatePreparing', 'TaskStateRunning', 'TaskStateCompleted' + State TaskState `json:"state,omitempty"` + StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"` + // PreviousState - This property is not set if the task is in its initial Active state. Possible values include: 'TaskStateActive', 'TaskStatePreparing', 'TaskStateRunning', 'TaskStateCompleted' + PreviousState TaskState `json:"previousState,omitempty"` + // PreviousStateTransitionTime - This property is not set if the task is in its initial Active state. + PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"` + // CommandLine - For multi-instance tasks, the command line is executed as the primary task, after the primary task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + CommandLine *string `json:"commandLine,omitempty"` + // ContainerSettings - If the pool that will run this task has containerConfiguration set, this must be set as well. If the pool that will run this task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"` + // ResourceFiles - For multi-instance tasks, the resource files will only be downloaded to the compute node on which the primary task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"` + // OutputFiles - For multi-instance tasks, the files will only be uploaded from the compute node on which the primary task is executed. + OutputFiles *[]OutputFile `json:"outputFiles,omitempty"` + EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"` + AffinityInfo *AffinityInformation `json:"affinityInfo,omitempty"` + Constraints *TaskConstraints `json:"constraints,omitempty"` + // UserIdentity - If omitted, the task runs as a non-administrative user unique to the task. + UserIdentity *UserIdentity `json:"userIdentity,omitempty"` + ExecutionInfo *TaskExecutionInformation `json:"executionInfo,omitempty"` + NodeInfo *ComputeNodeInformation `json:"nodeInfo,omitempty"` + MultiInstanceSettings *MultiInstanceSettings `json:"multiInstanceSettings,omitempty"` + Stats *TaskStatistics `json:"stats,omitempty"` + // DependsOn - This task will not be scheduled until all tasks that it depends on have completed successfully. If any of those tasks fail and exhaust their retry counts, this task will never be scheduled. + DependsOn *TaskDependencies `json:"dependsOn,omitempty"` + // ApplicationPackageReferences - Application packages are downloaded and deployed to a shared directory, not the task working directory. Therefore, if a referenced package is already on the compute node, and is up to date, then it is not re-downloaded; the existing copy on the compute node is used. If a referenced application package cannot be installed, for example because the package has been deleted or because download failed, the task fails. + ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"` + // AuthenticationTokenSettings - If this property is set, the Batch service provides the task with an authentication token which can be used to authenticate Batch service operations without requiring an account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the task can carry out using the token depend on the settings. For example, a task can request job permissions in order to add other tasks to the job, or check the status of the job or of other tasks under the job. + AuthenticationTokenSettings *AuthenticationTokenSettings `json:"authenticationTokenSettings,omitempty"` +} + +// CloudTaskListResult ... +type CloudTaskListResult struct { + autorest.Response `json:"-"` + Value *[]CloudTask `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// CloudTaskListResultIterator provides access to a complete listing of CloudTask values. +type CloudTaskListResultIterator struct { + i int + page CloudTaskListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CloudTaskListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudTaskListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CloudTaskListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CloudTaskListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CloudTaskListResultIterator) Response() CloudTaskListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CloudTaskListResultIterator) Value() CloudTask { + if !iter.page.NotDone() { + return CloudTask{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CloudTaskListResultIterator type. +func NewCloudTaskListResultIterator(page CloudTaskListResultPage) CloudTaskListResultIterator { + return CloudTaskListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ctlr CloudTaskListResult) IsEmpty() bool { + return ctlr.Value == nil || len(*ctlr.Value) == 0 +} + +// cloudTaskListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ctlr CloudTaskListResult) cloudTaskListResultPreparer(ctx context.Context) (*http.Request, error) { + if ctlr.OdataNextLink == nil || len(to.String(ctlr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ctlr.OdataNextLink))) +} + +// CloudTaskListResultPage contains a page of CloudTask values. +type CloudTaskListResultPage struct { + fn func(context.Context, CloudTaskListResult) (CloudTaskListResult, error) + ctlr CloudTaskListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CloudTaskListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudTaskListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ctlr) + if err != nil { + return err + } + page.ctlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CloudTaskListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CloudTaskListResultPage) NotDone() bool { + return !page.ctlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CloudTaskListResultPage) Response() CloudTaskListResult { + return page.ctlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CloudTaskListResultPage) Values() []CloudTask { + if page.ctlr.IsEmpty() { + return nil + } + return *page.ctlr.Value +} + +// Creates a new instance of the CloudTaskListResultPage type. +func NewCloudTaskListResultPage(getNextPage func(context.Context, CloudTaskListResult) (CloudTaskListResult, error)) CloudTaskListResultPage { + return CloudTaskListResultPage{fn: getNextPage} +} + +// CloudTaskListSubtasksResult ... +type CloudTaskListSubtasksResult struct { + autorest.Response `json:"-"` + Value *[]SubtaskInformation `json:"value,omitempty"` +} + +// ComputeNode ... +type ComputeNode struct { + autorest.Response `json:"-"` + // ID - Every node that is added to a pool is assigned a unique ID. Whenever a node is removed from a pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new nodes. + ID *string `json:"id,omitempty"` + URL *string `json:"url,omitempty"` + // State - The low-priority node has been preempted. Tasks which were running on the node when it was preempted will be rescheduled when another node becomes available. Possible values include: 'Idle', 'Rebooting', 'Reimaging', 'Running', 'Unusable', 'Creating', 'Starting', 'WaitingForStartTask', 'StartTaskFailed', 'Unknown', 'LeavingPool', 'Offline', 'Preempted' + State ComputeNodeState `json:"state,omitempty"` + // SchedulingState - Possible values include: 'Enabled', 'Disabled' + SchedulingState SchedulingState `json:"schedulingState,omitempty"` + StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"` + // LastBootTime - This property may not be present if the node state is unusable. + LastBootTime *date.Time `json:"lastBootTime,omitempty"` + // AllocationTime - This is the time when the node was initially allocated and doesn't change once set. It is not updated when the node is service healed or preempted. + AllocationTime *date.Time `json:"allocationTime,omitempty"` + // IPAddress - Every node that is added to a pool is assigned a unique IP address. Whenever a node is removed from a pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new nodes. + IPAddress *string `json:"ipAddress,omitempty"` + // AffinityID - Note that this is just a soft affinity. If the target node is busy or unavailable at the time the task is scheduled, then the task will be scheduled elsewhere. + AffinityID *string `json:"affinityId,omitempty"` + // VMSize - For information about available sizes of virtual machines in pools, see Choose a VM size for compute nodes in an Azure Batch pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + VMSize *string `json:"vmSize,omitempty"` + TotalTasksRun *int32 `json:"totalTasksRun,omitempty"` + RunningTasksCount *int32 `json:"runningTasksCount,omitempty"` + TotalTasksSucceeded *int32 `json:"totalTasksSucceeded,omitempty"` + // RecentTasks - This property is present only if at least one task has run on this node since it was assigned to the pool. + RecentTasks *[]TaskInformation `json:"recentTasks,omitempty"` + StartTask *StartTask `json:"startTask,omitempty"` + StartTaskInfo *StartTaskInformation `json:"startTaskInfo,omitempty"` + // CertificateReferences - For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. + CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"` + Errors *[]ComputeNodeError `json:"errors,omitempty"` + IsDedicated *bool `json:"isDedicated,omitempty"` + EndpointConfiguration *ComputeNodeEndpointConfiguration `json:"endpointConfiguration,omitempty"` + NodeAgentInfo *NodeAgentInformation `json:"nodeAgentInfo,omitempty"` +} + +// ComputeNodeEndpointConfiguration ... +type ComputeNodeEndpointConfiguration struct { + InboundEndpoints *[]InboundEndpoint `json:"inboundEndpoints,omitempty"` +} + +// ComputeNodeError ... +type ComputeNodeError struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + ErrorDetails *[]NameValuePair `json:"errorDetails,omitempty"` +} + +// ComputeNodeGetRemoteLoginSettingsResult ... +type ComputeNodeGetRemoteLoginSettingsResult struct { + autorest.Response `json:"-"` + RemoteLoginIPAddress *string `json:"remoteLoginIPAddress,omitempty"` + RemoteLoginPort *int32 `json:"remoteLoginPort,omitempty"` +} + +// ComputeNodeInformation ... +type ComputeNodeInformation struct { + AffinityID *string `json:"affinityId,omitempty"` + NodeURL *string `json:"nodeUrl,omitempty"` + PoolID *string `json:"poolId,omitempty"` + NodeID *string `json:"nodeId,omitempty"` + TaskRootDirectory *string `json:"taskRootDirectory,omitempty"` + TaskRootDirectoryURL *string `json:"taskRootDirectoryUrl,omitempty"` +} + +// ComputeNodeListResult ... +type ComputeNodeListResult struct { + autorest.Response `json:"-"` + Value *[]ComputeNode `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// ComputeNodeListResultIterator provides access to a complete listing of ComputeNode values. +type ComputeNodeListResultIterator struct { + i int + page ComputeNodeListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ComputeNodeListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ComputeNodeListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ComputeNodeListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ComputeNodeListResultIterator) Response() ComputeNodeListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ComputeNodeListResultIterator) Value() ComputeNode { + if !iter.page.NotDone() { + return ComputeNode{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ComputeNodeListResultIterator type. +func NewComputeNodeListResultIterator(page ComputeNodeListResultPage) ComputeNodeListResultIterator { + return ComputeNodeListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (cnlr ComputeNodeListResult) IsEmpty() bool { + return cnlr.Value == nil || len(*cnlr.Value) == 0 +} + +// computeNodeListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cnlr ComputeNodeListResult) computeNodeListResultPreparer(ctx context.Context) (*http.Request, error) { + if cnlr.OdataNextLink == nil || len(to.String(cnlr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cnlr.OdataNextLink))) +} + +// ComputeNodeListResultPage contains a page of ComputeNode values. +type ComputeNodeListResultPage struct { + fn func(context.Context, ComputeNodeListResult) (ComputeNodeListResult, error) + cnlr ComputeNodeListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ComputeNodeListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.cnlr) + if err != nil { + return err + } + page.cnlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ComputeNodeListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ComputeNodeListResultPage) NotDone() bool { + return !page.cnlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ComputeNodeListResultPage) Response() ComputeNodeListResult { + return page.cnlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ComputeNodeListResultPage) Values() []ComputeNode { + if page.cnlr.IsEmpty() { + return nil + } + return *page.cnlr.Value +} + +// Creates a new instance of the ComputeNodeListResultPage type. +func NewComputeNodeListResultPage(getNextPage func(context.Context, ComputeNodeListResult) (ComputeNodeListResult, error)) ComputeNodeListResultPage { + return ComputeNodeListResultPage{fn: getNextPage} +} + +// ComputeNodeUser ... +type ComputeNodeUser struct { + Name *string `json:"name,omitempty"` + // IsAdmin - The default value is false. + IsAdmin *bool `json:"isAdmin,omitempty"` + // ExpiryTime - If omitted, the default is 1 day from the current time. For Linux compute nodes, the expiryTime has a precision up to a day. + ExpiryTime *date.Time `json:"expiryTime,omitempty"` + // Password - The password is required for Windows nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows image reference). For Linux compute nodes, the password can optionally be specified along with the sshPublicKey property. + Password *string `json:"password,omitempty"` + // SSHPublicKey - The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux nodes. If this is specified for a Windows node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + SSHPublicKey *string `json:"sshPublicKey,omitempty"` +} + +// ContainerConfiguration ... +type ContainerConfiguration struct { + Type *string `json:"type,omitempty"` + // ContainerImageNames - This is the full image reference, as would be specified to "docker pull". An image will be sourced from the default Docker registry unless the image is fully qualified with an alternative registry. + ContainerImageNames *[]string `json:"containerImageNames,omitempty"` + // ContainerRegistries - If any images must be downloaded from a private registry which requires credentials, then those credentials must be provided here. + ContainerRegistries *[]ContainerRegistry `json:"containerRegistries,omitempty"` +} + +// ContainerRegistry ... +type ContainerRegistry struct { + // RegistryServer - If omitted, the default is "docker.io". + RegistryServer *string `json:"registryServer,omitempty"` + UserName *string `json:"username,omitempty"` + Password *string `json:"password,omitempty"` +} + +// DataDisk ... +type DataDisk struct { + // Lun - The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun. + Lun *int32 `json:"lun,omitempty"` + // Caching - The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. Possible values include: 'None', 'ReadOnly', 'ReadWrite' + Caching CachingType `json:"caching,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // StorageAccountType - If omitted, the default is "standard_lrs". Possible values include: 'StandardLRS', 'PremiumLRS' + StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` +} + +// DeleteCertificateError ... +type DeleteCertificateError struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + // Values - This list includes details such as the active pools and nodes referencing this certificate. However, if a large number of resources reference the certificate, the list contains only about the first hundred. + Values *[]NameValuePair `json:"values,omitempty"` +} + +// EnvironmentSetting ... +type EnvironmentSetting struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// Error ... +type Error struct { + Code *string `json:"code,omitempty"` + Message *ErrorMessage `json:"message,omitempty"` + Values *[]ErrorDetail `json:"values,omitempty"` +} + +// ErrorDetail ... +type ErrorDetail struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} + +// ErrorMessage ... +type ErrorMessage struct { + Lang *string `json:"lang,omitempty"` + Value *string `json:"value,omitempty"` +} + +// ExitCodeMapping ... +type ExitCodeMapping struct { + Code *int32 `json:"code,omitempty"` + ExitOptions *ExitOptions `json:"exitOptions,omitempty"` +} + +// ExitCodeRangeMapping ... +type ExitCodeRangeMapping struct { + Start *int32 `json:"start,omitempty"` + End *int32 `json:"end,omitempty"` + ExitOptions *ExitOptions `json:"exitOptions,omitempty"` +} + +// ExitConditions ... +type ExitConditions struct { + ExitCodes *[]ExitCodeMapping `json:"exitCodes,omitempty"` + ExitCodeRanges *[]ExitCodeRangeMapping `json:"exitCodeRanges,omitempty"` + PreProcessingError *ExitOptions `json:"preProcessingError,omitempty"` + // FileUploadError - If the task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence. + FileUploadError *ExitOptions `json:"fileUploadError,omitempty"` + // Default - This value is used if the task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection. + Default *ExitOptions `json:"default,omitempty"` +} + +// ExitOptions ... +type ExitOptions struct { + // JobAction - The default is none for exit code 0 and terminate for all other exit conditions. If the job's onTaskFailed property is noaction, then specifying this property returns an error and the add task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Possible values include: 'JobActionNone', 'JobActionDisable', 'JobActionTerminate' + JobAction JobAction `json:"jobAction,omitempty"` + // DependencyAction - The default is 'satisfy' for exit code 0, and 'block' for all other exit conditions. If the job's usesTaskDependencies property is set to false, then specifying the dependencyAction property returns an error and the add task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Possible values include: 'Satisfy', 'Block' + DependencyAction DependencyAction `json:"dependencyAction,omitempty"` +} + +// FileProperties ... +type FileProperties struct { + // CreationTime - The creation time is not returned for files on Linux compute nodes. + CreationTime *date.Time `json:"creationTime,omitempty"` + LastModified *date.Time `json:"lastModified,omitempty"` + ContentLength *int64 `json:"contentLength,omitempty"` + ContentType *string `json:"contentType,omitempty"` + // FileMode - The file mode is returned only for files on Linux compute nodes. + FileMode *string `json:"fileMode,omitempty"` +} + +// ImageInformation ... +type ImageInformation struct { + NodeAgentSKUID *string `json:"nodeAgentSKUId,omitempty"` + ImageReference *ImageReference `json:"imageReference,omitempty"` + // OsType - Possible values include: 'Linux', 'Windows' + OsType OSType `json:"osType,omitempty"` + // Capabilities - Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. + Capabilities *[]string `json:"capabilities,omitempty"` + BatchSupportEndOfLife *date.Time `json:"batchSupportEndOfLife,omitempty"` + // VerificationType - Possible values include: 'Verified', 'Unverified' + VerificationType VerificationType `json:"verificationType,omitempty"` +} + +// ImageReference ... +type ImageReference struct { + // Publisher - For example, Canonical or MicrosoftWindowsServer. + Publisher *string `json:"publisher,omitempty"` + // Offer - For example, UbuntuServer or WindowsServer. + Offer *string `json:"offer,omitempty"` + // Sku - For example, 14.04.0-LTS or 2012-R2-Datacenter. + Sku *string `json:"sku,omitempty"` + // Version - A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. + Version *string `json:"version,omitempty"` + // VirtualMachineImageID - This property is mutually exclusive with other ImageReference properties. The Virtual Machine Image must be in the same region and subscription as the Azure Batch account. For information about the firewall settings for the Batch node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + VirtualMachineImageID *string `json:"virtualMachineImageId,omitempty"` +} + +// InboundEndpoint ... +type InboundEndpoint struct { + Name *string `json:"name,omitempty"` + // Protocol - Possible values include: 'TCP', 'UDP' + Protocol InboundEndpointProtocol `json:"protocol,omitempty"` + PublicIPAddress *string `json:"publicIPAddress,omitempty"` + PublicFQDN *string `json:"publicFQDN,omitempty"` + FrontendPort *int32 `json:"frontendPort,omitempty"` + BackendPort *int32 `json:"backendPort,omitempty"` +} + +// InboundNATPool ... +type InboundNATPool struct { + // Name - The name must be unique within a Batch pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. + Name *string `json:"name,omitempty"` + // Protocol - Possible values include: 'TCP', 'UDP' + Protocol InboundEndpointProtocol `json:"protocol,omitempty"` + // BackendPort - This must be unique within a Batch pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. + BackendPort *int32 `json:"backendPort,omitempty"` + // FrontendPortRangeStart - Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. + FrontendPortRangeStart *int32 `json:"frontendPortRangeStart,omitempty"` + // FrontendPortRangeEnd - Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. + FrontendPortRangeEnd *int32 `json:"frontendPortRangeEnd,omitempty"` + // NetworkSecurityGroupRules - The maximum number of rules that can be specified across all the endpoints on a Batch pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. + NetworkSecurityGroupRules *[]NetworkSecurityGroupRule `json:"networkSecurityGroupRules,omitempty"` +} + +// JobAddParameter ... +type JobAddParameter struct { + // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an account that differ only by case). + ID *string `json:"id,omitempty"` + // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + DisplayName *string `json:"displayName,omitempty"` + // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + Priority *int32 `json:"priority,omitempty"` + // Constraints - The execution constraints for the job. + Constraints *JobConstraints `json:"constraints,omitempty"` + // JobManagerTask - If the job does not specify a Job Manager task, the user must explicitly add tasks to the job. If the job does specify a Job Manager task, the Batch service creates the Job Manager task when the job is created, and will try to schedule the Job Manager task before scheduling other tasks in the job. The Job Manager task's typical purpose is to control and/or monitor job execution, for example by deciding what additional tasks to run, determining when the work is complete, etc. (However, a Job Manager task is not restricted to these activities - it is a fully-fledged task in the system and perform whatever actions are required for the job.) For example, a Job Manager task might download a file specified as a parameter, analyze the contents of that file and submit additional tasks based on those contents. + JobManagerTask *JobManagerTask `json:"jobManagerTask,omitempty"` + // JobPreparationTask - If a job has a Job Preparation task, the Batch service will run the Job Preparation task on a compute node before starting any tasks of that job on that compute node. + JobPreparationTask *JobPreparationTask `json:"jobPreparationTask,omitempty"` + // JobReleaseTask - A Job Release task cannot be specified without also specifying a Job Preparation task for the job. The Batch service runs the Job Release task on the compute nodes that have run the Job Preparation task. The primary purpose of the Job Release task is to undo changes to compute nodes made by the Job Preparation task. Example activities include deleting local files, or shutting down services that were started as part of job preparation. + JobReleaseTask *JobReleaseTask `json:"jobReleaseTask,omitempty"` + // CommonEnvironmentSettings - Individual tasks can override an environment setting specified here by specifying the same setting name with a different value. + CommonEnvironmentSettings *[]EnvironmentSetting `json:"commonEnvironmentSettings,omitempty"` + PoolInfo *PoolInformation `json:"poolInfo,omitempty"` + // OnAllTasksComplete - Note that if a job contains no tasks, then all tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the job properties to set onAllTasksComplete to terminatejob once you have finished adding tasks. The default is noaction. Possible values include: 'NoAction', 'TerminateJob' + OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"` + // OnTaskFailure - A task is considered to have failed if has a failureInfo. A failureInfo is set if the task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the task, for example due to a resource file download error. The default is noaction. Possible values include: 'OnTaskFailureNoAction', 'OnTaskFailurePerformExitOptionsJobAction' + OnTaskFailure OnTaskFailure `json:"onTaskFailure,omitempty"` + // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + Metadata *[]MetadataItem `json:"metadata,omitempty"` + UsesTaskDependencies *bool `json:"usesTaskDependencies,omitempty"` + NetworkConfiguration *JobNetworkConfiguration `json:"networkConfiguration,omitempty"` +} + +// JobConstraints ... +type JobConstraints struct { + // MaxWallClockTime - If the job does not complete within the time limit, the Batch service terminates it and any tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the job may run. + MaxWallClockTime *string `json:"maxWallClockTime,omitempty"` + // MaxTaskRetryCount - Note that this value specifically controls the number of retries. The Batch service will try each task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry tasks. If the maximum retry count is -1, the Batch service retries tasks without limit. The default value is 0 (no retries). + MaxTaskRetryCount *int32 `json:"maxTaskRetryCount,omitempty"` +} + +// JobDisableParameter ... +type JobDisableParameter struct { + // DisableTasks - Possible values include: 'DisableJobOptionRequeue', 'DisableJobOptionTerminate', 'DisableJobOptionWait' + DisableTasks DisableJobOption `json:"disableTasks,omitempty"` +} + +// JobExecutionInformation ... +type JobExecutionInformation struct { + // StartTime - This is the time at which the job was created. + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - This property is set only if the job is in the completed state. + EndTime *date.Time `json:"endTime,omitempty"` + // PoolID - This element contains the actual pool where the job is assigned. When you get job details from the service, they also contain a poolInfo element, which contains the pool configuration data from when the job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the job ran on an auto pool, and this property contains the ID of that auto pool. + PoolID *string `json:"poolId,omitempty"` + // SchedulingError - This property is not set if there was no error starting the job. + SchedulingError *JobSchedulingError `json:"schedulingError,omitempty"` + // TerminateReason - This property is set only if the job is in the completed state. If the Batch service terminates the job, it sets the reason as follows: JMComplete - the Job Manager task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the job reached its maxWallClockTime constraint. TerminateJobSchedule - the job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the job's onAllTasksComplete attribute is set to terminatejob, and all tasks in the job are complete. TaskFailed - the job's onTaskFailure attribute is set to performExitOptionsJobAction, and a task in the job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a job' operation. + TerminateReason *string `json:"terminateReason,omitempty"` +} + +// JobManagerTask the Job Manager task is automatically started when the job is created. The Batch service +// tries to schedule the Job Manager task before any other tasks in the job. When shrinking a pool, the +// Batch service tries to preserve compute nodes where Job Manager tasks are running for as long as +// possible (that is, nodes running 'normal' tasks are removed before nodes running Job Manager tasks). +// When a Job Manager task fails and needs to be restarted, the system tries to schedule it at the highest +// priority. If there are no idle nodes available, the system may terminate one of the running tasks in the +// pool and return it to the queue in order to make room for the Job Manager task to restart. Note that a +// Job Manager task in one job does not have priority over tasks in other jobs. Across jobs, only job level +// priorities are observed. For example, if a Job Manager in a priority 0 job needs to be restarted, it +// will not displace tasks of a priority 1 job. Batch will retry tasks when a recovery operation is +// triggered on a compute node. Examples of recovery operations include (but are not limited to) when an +// unhealthy compute node is rebooted or a compute node disappeared due to host failure. Retries due to +// recovery operations are independent of and are not counted against the maxTaskRetryCount. Even if the +// maxTaskRetryCount is 0, an internal retry due to a recovery operation may occur. Because of this, all +// tasks should be idempotent. This means tasks need to tolerate being interrupted and restarted without +// causing any corruption or duplicate data. The best practice for long running tasks is to use some form +// of checkpointing. +type JobManagerTask struct { + // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. + ID *string `json:"id,omitempty"` + // DisplayName - It need not be unique and can contain any Unicode characters up to a maximum length of 1024. + DisplayName *string `json:"displayName,omitempty"` + // CommandLine - The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + CommandLine *string `json:"commandLine,omitempty"` + // ContainerSettings - If the pool that will run this task has containerConfiguration set, this must be set as well. If the pool that will run this task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"` + // ResourceFiles - Files listed under this element are located in the task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"` + // OutputFiles - For multi-instance tasks, the files will only be uploaded from the compute node on which the primary task is executed. + OutputFiles *[]OutputFile `json:"outputFiles,omitempty"` + EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"` + Constraints *TaskConstraints `json:"constraints,omitempty"` + // KillJobOnCompletion - If true, when the Job Manager task completes, the Batch service marks the job as complete. If any tasks are still running at this time (other than Job Release), those tasks are terminated. If false, the completion of the Job Manager task does not affect the job status. In this case, you should either use the onAllTasksComplete attribute to terminate the job, or have a client or user terminate the job explicitly. An example of this is if the Job Manager creates a set of tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control job lifetime, and using the Job Manager task only to create the tasks for the job (not to monitor progress), then it is important to set killJobOnCompletion to false. + KillJobOnCompletion *bool `json:"killJobOnCompletion,omitempty"` + // UserIdentity - If omitted, the task runs as a non-administrative user unique to the task. + UserIdentity *UserIdentity `json:"userIdentity,omitempty"` + // RunExclusive - If true, no other tasks will run on the same compute node for as long as the Job Manager is running. If false, other tasks can run simultaneously with the Job Manager on a compute node. The Job Manager task counts normally against the node's concurrent task limit, so this is only relevant if the node allows multiple concurrent tasks. The default value is true. + RunExclusive *bool `json:"runExclusive,omitempty"` + // ApplicationPackageReferences - Application packages are downloaded and deployed to a shared directory, not the task working directory. Therefore, if a referenced package is already on the compute node, and is up to date, then it is not re-downloaded; the existing copy on the compute node is used. If a referenced application package cannot be installed, for example because the package has been deleted or because download failed, the task fails. + ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"` + // AuthenticationTokenSettings - If this property is set, the Batch service provides the task with an authentication token which can be used to authenticate Batch service operations without requiring an account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the task can carry out using the token depend on the settings. For example, a task can request job permissions in order to add other tasks to the job, or check the status of the job or of other tasks under the job. + AuthenticationTokenSettings *AuthenticationTokenSettings `json:"authenticationTokenSettings,omitempty"` + // AllowLowPriorityNode - The default value is true. + AllowLowPriorityNode *bool `json:"allowLowPriorityNode,omitempty"` +} + +// JobNetworkConfiguration ... +type JobNetworkConfiguration struct { + // SubnetID - The virtual network must be in the same region and subscription as the Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes which will run tasks from the job. This can be up to the number of nodes in the pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule tasks on the compute nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the compute nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the compute nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + SubnetID *string `json:"subnetId,omitempty"` +} + +// JobPatchParameter ... +type JobPatchParameter struct { + // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the job is left unchanged. + Priority *int32 `json:"priority,omitempty"` + // OnAllTasksComplete - If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Possible values include: 'NoAction', 'TerminateJob' + OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"` + // Constraints - If omitted, the existing execution constraints are left unchanged. + Constraints *JobConstraints `json:"constraints,omitempty"` + // PoolInfo - You may change the pool for a job only when the job is disabled. The Patch Job call will fail if you include the poolInfo element and the job is not disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If omitted, the job continues to run on its current pool. + PoolInfo *PoolInformation `json:"poolInfo,omitempty"` + // Metadata - If omitted, the existing job metadata is left unchanged. + Metadata *[]MetadataItem `json:"metadata,omitempty"` +} + +// JobPreparationAndReleaseTaskExecutionInformation ... +type JobPreparationAndReleaseTaskExecutionInformation struct { + PoolID *string `json:"poolId,omitempty"` + NodeID *string `json:"nodeId,omitempty"` + NodeURL *string `json:"nodeUrl,omitempty"` + JobPreparationTaskExecutionInfo *JobPreparationTaskExecutionInformation `json:"jobPreparationTaskExecutionInfo,omitempty"` + // JobReleaseTaskExecutionInfo - This property is set only if the Job Release task has run on the node. + JobReleaseTaskExecutionInfo *JobReleaseTaskExecutionInformation `json:"jobReleaseTaskExecutionInfo,omitempty"` +} + +// JobPreparationTask you can use Job Preparation to prepare a compute node to run tasks for the job. +// Activities commonly performed in Job Preparation include: Downloading common resource files used by all +// the tasks in the job. The Job Preparation task can download these common resource files to the shared +// location on the compute node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the +// compute node so that all tasks of that job can communicate with it. If the Job Preparation task fails +// (that is, exhausts its retry count before exiting with exit code 0), Batch will not run tasks of this +// job on the compute node. The node remains ineligible to run tasks of this job until it is reimaged. The +// node remains active and can be used for other jobs. The Job Preparation task can run multiple times on +// the same compute node. Therefore, you should write the Job Preparation task to handle re-execution. If +// the compute node is rebooted, the Job Preparation task is run again on the node before scheduling any +// other task of the job, if rerunOnNodeRebootAfterSuccess is true or if the Job Preparation task did not +// previously complete. If the compute node is reimaged, the Job Preparation task is run again before +// scheduling any task of the job. Batch will retry tasks when a recovery operation is triggered on a +// compute node. Examples of recovery operations include (but are not limited to) when an unhealthy compute +// node is rebooted or a compute node disappeared due to host failure. Retries due to recovery operations +// are independent of and are not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is +// 0, an internal retry due to a recovery operation may occur. Because of this, all tasks should be +// idempotent. This means tasks need to tolerate being interrupted and restarted without causing any +// corruption or duplicate data. The best practice for long running tasks is to use some form of +// checkpointing. +type JobPreparationTask struct { + // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other task in the job can have the same ID as the Job Preparation task. If you try to submit a task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). + ID *string `json:"id,omitempty"` + // CommandLine - The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + CommandLine *string `json:"commandLine,omitempty"` + // ContainerSettings - When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"` + // ResourceFiles - Files listed under this element are located in the task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"` + EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"` + Constraints *TaskConstraints `json:"constraints,omitempty"` + // WaitForSuccess - If true and the Job Preparation task fails on a compute node, the Batch service retries the Job Preparation task up to its maximum retry count (as specified in the constraints element). If the task has still not completed successfully after all retries, then the Batch service will not schedule tasks of the job to the compute node. The compute node remains active and eligible to run tasks of other jobs. If false, the Batch service will not wait for the Job Preparation task to complete. In this case, other tasks of the job can start executing on the compute node while the Job Preparation task is still running; and even if the Job Preparation task fails, new tasks will continue to be scheduled on the node. The default value is true. + WaitForSuccess *bool `json:"waitForSuccess,omitempty"` + // UserIdentity - If omitted, the task runs as a non-administrative user unique to the task on Windows nodes, or a non-administrative user unique to the pool on Linux nodes. + UserIdentity *UserIdentity `json:"userIdentity,omitempty"` + // RerunOnNodeRebootAfterSuccess - The Job Preparation task is always rerun if a compute node is reimaged, or if the Job Preparation task did not complete (e.g. because the reboot occurred while the task was running). Therefore, you should always write a Job Preparation task to be idempotent and to behave correctly if run multiple times. The default value is true. + RerunOnNodeRebootAfterSuccess *bool `json:"rerunOnNodeRebootAfterSuccess,omitempty"` +} + +// JobPreparationTaskExecutionInformation ... +type JobPreparationTaskExecutionInformation struct { + // StartTime - If the task has been restarted or retried, this is the most recent time at which the task started running. + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - This property is set only if the task is in the Completed state. + EndTime *date.Time `json:"endTime,omitempty"` + // State - Possible values include: 'JobPreparationTaskStateRunning', 'JobPreparationTaskStateCompleted' + State JobPreparationTaskState `json:"state,omitempty"` + TaskRootDirectory *string `json:"taskRootDirectory,omitempty"` + TaskRootDirectoryURL *string `json:"taskRootDirectoryUrl,omitempty"` + // ExitCode - This parameter is returned only if the task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the compute node operating system, such as when a process is forcibly terminated. + ExitCode *int32 `json:"exitCode,omitempty"` + // ContainerInfo - This property is set only if the task runs in a container context. + ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"` + // FailureInfo - This property is set only if the task is in the completed state and encountered a failure. + FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"` + // RetryCount - Task application failures (non-zero exit code) are retried, pre-processing errors (the task could not be run) and file upload errors are not retried. The Batch service will retry the task up to the limit specified by the constraints. + RetryCount *int32 `json:"retryCount,omitempty"` + // LastRetryTime - This property is set only if the task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the task has been restarted for reasons other than retry; for example, if the compute node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + LastRetryTime *date.Time `json:"lastRetryTime,omitempty"` + // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure' + Result TaskExecutionResult `json:"result,omitempty"` +} + +// JobReleaseTask the Job Release task runs when the job ends, because of one of the following: The user +// calls the Terminate Job API, or the Delete Job API while the job is still active, the job's maximum wall +// clock time constraint is reached, and the job is still active, or the job's Job Manager task completed, +// and the job is configured to terminate when the Job Manager completes. The Job Release task runs on each +// compute node where tasks of the job have run and the Job Preparation task ran and completed. If you +// reimage a compute node after it has run the Job Preparation task, and the job ends without any further +// tasks of the job running on that compute node (and hence the Job Preparation task does not re-run), then +// the Job Release task does not run on that node. If a compute node reboots while the Job Release task is +// still running, the Job Release task runs again when the compute node starts up. The job is not marked as +// complete until all Job Release tasks have completed. The Job Release task runs in the background. It +// does not occupy a scheduling slot; that is, it does not count towards the maxTasksPerNode limit +// specified on the pool. +type JobReleaseTask struct { + // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other task in the job can have the same ID as the Job Release task. If you try to submit a task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). + ID *string `json:"id,omitempty"` + // CommandLine - The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + CommandLine *string `json:"commandLine,omitempty"` + // ContainerSettings - When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"` + // ResourceFiles - Files listed under this element are located in the task's working directory. + ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"` + EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"` + MaxWallClockTime *string `json:"maxWallClockTime,omitempty"` + // RetentionTime - The default is 7 days, i.e. the task directory will be retained for 7 days unless the compute node is removed or the job is deleted. + RetentionTime *string `json:"retentionTime,omitempty"` + // UserIdentity - If omitted, the task runs as a non-administrative user unique to the task. + UserIdentity *UserIdentity `json:"userIdentity,omitempty"` +} + +// JobReleaseTaskExecutionInformation ... +type JobReleaseTaskExecutionInformation struct { + // StartTime - If the task has been restarted or retried, this is the most recent time at which the task started running. + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - This property is set only if the task is in the Completed state. + EndTime *date.Time `json:"endTime,omitempty"` + // State - Possible values include: 'JobReleaseTaskStateRunning', 'JobReleaseTaskStateCompleted' + State JobReleaseTaskState `json:"state,omitempty"` + TaskRootDirectory *string `json:"taskRootDirectory,omitempty"` + TaskRootDirectoryURL *string `json:"taskRootDirectoryUrl,omitempty"` + // ExitCode - This parameter is returned only if the task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the compute node operating system, such as when a process is forcibly terminated. + ExitCode *int32 `json:"exitCode,omitempty"` + // ContainerInfo - This property is set only if the task runs in a container context. + ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"` + // FailureInfo - This property is set only if the task is in the completed state and encountered a failure. + FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"` + // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure' + Result TaskExecutionResult `json:"result,omitempty"` +} + +// JobScheduleAddParameter ... +type JobScheduleAddParameter struct { + // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an account that differ only by case). + ID *string `json:"id,omitempty"` + // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + DisplayName *string `json:"displayName,omitempty"` + Schedule *Schedule `json:"schedule,omitempty"` + JobSpecification *JobSpecification `json:"jobSpecification,omitempty"` + // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + Metadata *[]MetadataItem `json:"metadata,omitempty"` +} + +// JobScheduleExecutionInformation ... +type JobScheduleExecutionInformation struct { + // NextRunTime - This property is meaningful only if the schedule is in the active state when the time comes around. For example, if the schedule is disabled, no job will be created at nextRunTime unless the job is enabled before then. + NextRunTime *date.Time `json:"nextRunTime,omitempty"` + // RecentJob - This property is present only if the at least one job has run under the schedule. + RecentJob *RecentJob `json:"recentJob,omitempty"` + // EndTime - This property is set only if the job schedule is in the completed state. + EndTime *date.Time `json:"endTime,omitempty"` +} + +// JobSchedulePatchParameter ... +type JobSchedulePatchParameter struct { + // Schedule - If you do not specify this element, the existing schedule is left unchanged. + Schedule *Schedule `json:"schedule,omitempty"` + // JobSpecification - Updates affect only jobs that are started after the update has taken place. Any currently active job continues with the older specification. + JobSpecification *JobSpecification `json:"jobSpecification,omitempty"` + // Metadata - If you do not specify this element, existing metadata is left unchanged. + Metadata *[]MetadataItem `json:"metadata,omitempty"` +} + +// JobScheduleStatistics ... +type JobScheduleStatistics struct { + URL *string `json:"url,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"` + UserCPUTime *string `json:"userCPUTime,omitempty"` + KernelCPUTime *string `json:"kernelCPUTime,omitempty"` + // WallClockTime - The wall clock time is the elapsed time from when the task started running on a compute node to when it finished (or to the last time the statistics were updated, if the task had not finished by then). If a task was retried, this includes the wall clock time of all the task retries. + WallClockTime *string `json:"wallClockTime,omitempty"` + ReadIOps *int64 `json:"readIOps,omitempty"` + WriteIOps *int64 `json:"writeIOps,omitempty"` + ReadIOGiB *float64 `json:"readIOGiB,omitempty"` + WriteIOGiB *float64 `json:"writeIOGiB,omitempty"` + NumSucceededTasks *int64 `json:"numSucceededTasks,omitempty"` + NumFailedTasks *int64 `json:"numFailedTasks,omitempty"` + NumTaskRetries *int64 `json:"numTaskRetries,omitempty"` + // WaitTime - This value is only reported in the account lifetime statistics; it is not included in the job statistics. + WaitTime *string `json:"waitTime,omitempty"` +} + +// JobScheduleUpdateParameter ... +type JobScheduleUpdateParameter struct { + // Schedule - If you do not specify this element, it is equivalent to passing the default schedule: that is, a single job scheduled to run immediately. + Schedule *Schedule `json:"schedule,omitempty"` + // JobSpecification - Updates affect only jobs that are started after the update has taken place. Any currently active job continues with the older specification. + JobSpecification *JobSpecification `json:"jobSpecification,omitempty"` + // Metadata - If you do not specify this element, it takes the default value of an empty list; in effect, any existing metadata is deleted. + Metadata *[]MetadataItem `json:"metadata,omitempty"` +} + +// JobSchedulingError ... +type JobSchedulingError struct { + // Category - Possible values include: 'UserError', 'ServerError' + Category ErrorCategory `json:"category,omitempty"` + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Details *[]NameValuePair `json:"details,omitempty"` +} + +// JobSpecification ... +type JobSpecification struct { + // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all jobs under the job schedule. You can update a job's priority after it has been created using by using the update job API. + Priority *int32 `json:"priority,omitempty"` + // DisplayName - The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + DisplayName *string `json:"displayName,omitempty"` + UsesTaskDependencies *bool `json:"usesTaskDependencies,omitempty"` + // OnAllTasksComplete - Note that if a job contains no tasks, then all tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the job properties to set onAllTasksComplete to terminatejob once you have finished adding tasks. The default is noaction. Possible values include: 'NoAction', 'TerminateJob' + OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"` + // OnTaskFailure - The default is noaction. Possible values include: 'OnTaskFailureNoAction', 'OnTaskFailurePerformExitOptionsJobAction' + OnTaskFailure OnTaskFailure `json:"onTaskFailure,omitempty"` + NetworkConfiguration *JobNetworkConfiguration `json:"networkConfiguration,omitempty"` + Constraints *JobConstraints `json:"constraints,omitempty"` + // JobManagerTask - If the job does not specify a Job Manager task, the user must explicitly add tasks to the job using the Task API. If the job does specify a Job Manager task, the Batch service creates the Job Manager task when the job is created, and will try to schedule the Job Manager task before scheduling other tasks in the job. + JobManagerTask *JobManagerTask `json:"jobManagerTask,omitempty"` + // JobPreparationTask - If a job has a Job Preparation task, the Batch service will run the Job Preparation task on a compute node before starting any tasks of that job on that compute node. + JobPreparationTask *JobPreparationTask `json:"jobPreparationTask,omitempty"` + // JobReleaseTask - The primary purpose of the Job Release task is to undo changes to compute nodes made by the Job Preparation task. Example activities include deleting local files, or shutting down services that were started as part of job preparation. A Job Release task cannot be specified without also specifying a Job Preparation task for the job. The Batch service runs the Job Release task on the compute nodes that have run the Job Preparation task. + JobReleaseTask *JobReleaseTask `json:"jobReleaseTask,omitempty"` + // CommonEnvironmentSettings - Individual tasks can override an environment setting specified here by specifying the same setting name with a different value. + CommonEnvironmentSettings *[]EnvironmentSetting `json:"commonEnvironmentSettings,omitempty"` + PoolInfo *PoolInformation `json:"poolInfo,omitempty"` + // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + Metadata *[]MetadataItem `json:"metadata,omitempty"` +} + +// JobStatistics ... +type JobStatistics struct { + autorest.Response `json:"-"` + URL *string `json:"url,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"` + UserCPUTime *string `json:"userCPUTime,omitempty"` + KernelCPUTime *string `json:"kernelCPUTime,omitempty"` + // WallClockTime - The wall clock time is the elapsed time from when the task started running on a compute node to when it finished (or to the last time the statistics were updated, if the task had not finished by then). If a task was retried, this includes the wall clock time of all the task retries. + WallClockTime *string `json:"wallClockTime,omitempty"` + ReadIOps *int64 `json:"readIOps,omitempty"` + WriteIOps *int64 `json:"writeIOps,omitempty"` + ReadIOGiB *float64 `json:"readIOGiB,omitempty"` + WriteIOGiB *float64 `json:"writeIOGiB,omitempty"` + // NumSucceededTasks - A task completes successfully if it returns exit code 0. + NumSucceededTasks *int64 `json:"numSucceededTasks,omitempty"` + // NumFailedTasks - A task fails if it exhausts its maximum retry count without returning exit code 0. + NumFailedTasks *int64 `json:"numFailedTasks,omitempty"` + NumTaskRetries *int64 `json:"numTaskRetries,omitempty"` + // WaitTime - The wait time for a task is defined as the elapsed time between the creation of the task and the start of task execution. (If the task is retried due to failures, the wait time is the time to the most recent task execution.) This value is only reported in the account lifetime statistics; it is not included in the job statistics. + WaitTime *string `json:"waitTime,omitempty"` +} + +// JobTerminateParameter ... +type JobTerminateParameter struct { + TerminateReason *string `json:"terminateReason,omitempty"` +} + +// JobUpdateParameter ... +type JobUpdateParameter struct { + // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, it is set to the default value 0. + Priority *int32 `json:"priority,omitempty"` + // Constraints - If omitted, the constraints are cleared. + Constraints *JobConstraints `json:"constraints,omitempty"` + // PoolInfo - You may change the pool for a job only when the job is disabled. The Update Job call will fail if you include the poolInfo element and the job is not disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. + PoolInfo *PoolInformation `json:"poolInfo,omitempty"` + // Metadata - If omitted, it takes the default value of an empty list; in effect, any existing metadata is deleted. + Metadata *[]MetadataItem `json:"metadata,omitempty"` + // OnAllTasksComplete - If omitted, the completion behavior is set to noaction. If the current value is terminatejob, this is an error because a job's completion behavior may not be changed from terminatejob to noaction. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic job termination, you cannot turn it off again. If you try to do this, the request fails and Batch returns status code 400 (Bad Request) and an 'invalid property value' error response. If you do not specify this element in a PUT request, it is equivalent to passing noaction. This is an error if the current value is terminatejob. Possible values include: 'NoAction', 'TerminateJob' + OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"` +} + +// LinuxUserConfiguration ... +type LinuxUserConfiguration struct { + // UID - The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid. + UID *int32 `json:"uid,omitempty"` + // Gid - The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid. + Gid *int32 `json:"gid,omitempty"` + // SSHPrivateKey - The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between nodes in a Linux pool when the pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between nodes (no modification of the user's .ssh directory is done). + SSHPrivateKey *string `json:"sshPrivateKey,omitempty"` +} + +// MetadataItem the Batch service does not assign any meaning to this metadata; it is solely for the use of +// user code. +type MetadataItem struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// MultiInstanceSettings multi-instance tasks are commonly used to support MPI tasks. In the MPI case, if +// any of the subtasks fail (for example due to exiting with a non-zero exit code) the entire +// multi-instance task fails. The multi-instance task is then terminated and retried, up to its retry +// limit. +type MultiInstanceSettings struct { + // NumberOfInstances - If omitted, the default is 1. + NumberOfInstances *int32 `json:"numberOfInstances,omitempty"` + // CoordinationCommandLine - A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages. + CoordinationCommandLine *string `json:"coordinationCommandLine,omitempty"` + // CommonResourceFiles - The difference between common resource files and task resource files is that common resource files are downloaded for all subtasks including the primary, whereas task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the task working directory, but instead are downloaded to the task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + CommonResourceFiles *[]ResourceFile `json:"commonResourceFiles,omitempty"` +} + +// NameValuePair ... +type NameValuePair struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// NetworkConfiguration the network configuration for a pool. +type NetworkConfiguration struct { + // SubnetID - The virtual network must be in the same region and subscription as the Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate compute nodes, and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule tasks on the compute nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the compute nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the compute nodes to unusable. For pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, but for pools created with cloudServiceConfiguration both ARM and classic virtual networks are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + SubnetID *string `json:"subnetId,omitempty"` + // DynamicVNetAssignmentScope - Possible values include: 'DynamicVNetAssignmentScopeNone', 'DynamicVNetAssignmentScopeJob' + DynamicVNetAssignmentScope DynamicVNetAssignmentScope `json:"dynamicVNetAssignmentScope,omitempty"` + // EndpointConfiguration - Pool endpoint configuration is only supported on pools with the virtualMachineConfiguration property. + EndpointConfiguration *PoolEndpointConfiguration `json:"endpointConfiguration,omitempty"` +} + +// NetworkSecurityGroupRule ... +type NetworkSecurityGroupRule struct { + // Priority - Priorities within a pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 3500. If any reserved or duplicate values are provided the request fails with HTTP status code 400. + Priority *int32 `json:"priority,omitempty"` + // Access - Possible values include: 'Allow', 'Deny' + Access NetworkSecurityGroupRuleAccess `json:"access,omitempty"` + // SourceAddressPrefix - Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. + SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` + // SourcePortRanges - Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'. + SourcePortRanges *[]string `json:"sourcePortRanges,omitempty"` +} + +// NodeAgentInformation the Batch node agent is a program that runs on each node in the pool and provides +// Batch capability on the compute node. +type NodeAgentInformation struct { + // Version - This version number can be checked against the node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + Version *string `json:"version,omitempty"` + // LastUpdateTime - This is the most recent time that the node agent was updated to a new version. + LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"` +} + +// NodeCounts ... +type NodeCounts struct { + Creating *int32 `json:"creating,omitempty"` + Idle *int32 `json:"idle,omitempty"` + Offline *int32 `json:"offline,omitempty"` + Preempted *int32 `json:"preempted,omitempty"` + Rebooting *int32 `json:"rebooting,omitempty"` + Reimaging *int32 `json:"reimaging,omitempty"` + Running *int32 `json:"running,omitempty"` + Starting *int32 `json:"starting,omitempty"` + StartTaskFailed *int32 `json:"startTaskFailed,omitempty"` + LeavingPool *int32 `json:"leavingPool,omitempty"` + Unknown *int32 `json:"unknown,omitempty"` + Unusable *int32 `json:"unusable,omitempty"` + WaitingForStartTask *int32 `json:"waitingForStartTask,omitempty"` + Total *int32 `json:"total,omitempty"` +} + +// NodeDisableSchedulingParameter ... +type NodeDisableSchedulingParameter struct { + // NodeDisableSchedulingOption - The default value is requeue. Possible values include: 'DisableComputeNodeSchedulingOptionRequeue', 'DisableComputeNodeSchedulingOptionTerminate', 'DisableComputeNodeSchedulingOptionTaskCompletion' + NodeDisableSchedulingOption DisableComputeNodeSchedulingOption `json:"nodeDisableSchedulingOption,omitempty"` +} + +// NodeFile ... +type NodeFile struct { + Name *string `json:"name,omitempty"` + URL *string `json:"url,omitempty"` + IsDirectory *bool `json:"isDirectory,omitempty"` + Properties *FileProperties `json:"properties,omitempty"` +} + +// NodeFileListResult ... +type NodeFileListResult struct { + autorest.Response `json:"-"` + Value *[]NodeFile `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// NodeFileListResultIterator provides access to a complete listing of NodeFile values. +type NodeFileListResultIterator struct { + i int + page NodeFileListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *NodeFileListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/NodeFileListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *NodeFileListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter NodeFileListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter NodeFileListResultIterator) Response() NodeFileListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter NodeFileListResultIterator) Value() NodeFile { + if !iter.page.NotDone() { + return NodeFile{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the NodeFileListResultIterator type. +func NewNodeFileListResultIterator(page NodeFileListResultPage) NodeFileListResultIterator { + return NodeFileListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (nflr NodeFileListResult) IsEmpty() bool { + return nflr.Value == nil || len(*nflr.Value) == 0 +} + +// nodeFileListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (nflr NodeFileListResult) nodeFileListResultPreparer(ctx context.Context) (*http.Request, error) { + if nflr.OdataNextLink == nil || len(to.String(nflr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(nflr.OdataNextLink))) +} + +// NodeFileListResultPage contains a page of NodeFile values. +type NodeFileListResultPage struct { + fn func(context.Context, NodeFileListResult) (NodeFileListResult, error) + nflr NodeFileListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *NodeFileListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/NodeFileListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.nflr) + if err != nil { + return err + } + page.nflr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *NodeFileListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page NodeFileListResultPage) NotDone() bool { + return !page.nflr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page NodeFileListResultPage) Response() NodeFileListResult { + return page.nflr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page NodeFileListResultPage) Values() []NodeFile { + if page.nflr.IsEmpty() { + return nil + } + return *page.nflr.Value +} + +// Creates a new instance of the NodeFileListResultPage type. +func NewNodeFileListResultPage(getNextPage func(context.Context, NodeFileListResult) (NodeFileListResult, error)) NodeFileListResultPage { + return NodeFileListResultPage{fn: getNextPage} +} + +// NodeRebootParameter ... +type NodeRebootParameter struct { + // NodeRebootOption - The default value is requeue. Possible values include: 'ComputeNodeRebootOptionRequeue', 'ComputeNodeRebootOptionTerminate', 'ComputeNodeRebootOptionTaskCompletion', 'ComputeNodeRebootOptionRetainedData' + NodeRebootOption ComputeNodeRebootOption `json:"nodeRebootOption,omitempty"` +} + +// NodeReimageParameter ... +type NodeReimageParameter struct { + // NodeReimageOption - The default value is requeue. Possible values include: 'ComputeNodeReimageOptionRequeue', 'ComputeNodeReimageOptionTerminate', 'ComputeNodeReimageOptionTaskCompletion', 'ComputeNodeReimageOptionRetainedData' + NodeReimageOption ComputeNodeReimageOption `json:"nodeReimageOption,omitempty"` +} + +// NodeRemoveParameter ... +type NodeRemoveParameter struct { + NodeList *[]string `json:"nodeList,omitempty"` + // ResizeTimeout - The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + ResizeTimeout *string `json:"resizeTimeout,omitempty"` + // NodeDeallocationOption - The default value is requeue. Possible values include: 'Requeue', 'Terminate', 'TaskCompletion', 'RetainedData' + NodeDeallocationOption ComputeNodeDeallocationOption `json:"nodeDeallocationOption,omitempty"` +} + +// NodeUpdateUserParameter ... +type NodeUpdateUserParameter struct { + // Password - The password is required for Windows nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows image reference). For Linux compute nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. + Password *string `json:"password,omitempty"` + // ExpiryTime - If omitted, the default is 1 day from the current time. For Linux compute nodes, the expiryTime has a precision up to a day. + ExpiryTime *date.Time `json:"expiryTime,omitempty"` + // SSHPublicKey - The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux nodes. If this is specified for a Windows node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed. + SSHPublicKey *string `json:"sshPublicKey,omitempty"` +} + +// OutputFile ... +type OutputFile struct { + // FilePattern - Both relative and absolute paths are supported. Relative paths are relative to the task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. Brackets can include a negation to match any character not specified (for example [!abc] matches any character but a, b, or c). If a file name starts with "." it is ignored by default but may be matched by specifying it explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple example: **\*.txt matches any file that does not start in '.' and ends with .txt in the task working directory or any subdirectory. If the filename contains a wildcard character it can be escaped using brackets (for example abc[*] would match a file named abc*). Note that both \ and / are treated as directory separators on Windows, but only / is on Linux. Environment variables (%var% on Windows or $var on Linux) are expanded prior to the pattern being applied. + FilePattern *string `json:"filePattern,omitempty"` + Destination *OutputFileDestination `json:"destination,omitempty"` + UploadOptions *OutputFileUploadOptions `json:"uploadOptions,omitempty"` +} + +// OutputFileBlobContainerDestination ... +type OutputFileBlobContainerDestination struct { + // Path - If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. + Path *string `json:"path,omitempty"` + // ContainerURL - The URL must include a Shared Access Signature (SAS) granting write permissions to the container. + ContainerURL *string `json:"containerUrl,omitempty"` +} + +// OutputFileDestination ... +type OutputFileDestination struct { + Container *OutputFileBlobContainerDestination `json:"container,omitempty"` +} + +// OutputFileUploadOptions ... +type OutputFileUploadOptions struct { + // UploadCondition - The default is taskcompletion. Possible values include: 'OutputFileUploadConditionTaskSuccess', 'OutputFileUploadConditionTaskFailure', 'OutputFileUploadConditionTaskCompletion' + UploadCondition OutputFileUploadCondition `json:"uploadCondition,omitempty"` +} + +// PoolAddParameter ... +type PoolAddParameter struct { + // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two pool IDs within an account that differ only by case). + ID *string `json:"id,omitempty"` + // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + DisplayName *string `json:"displayName,omitempty"` + // VMSize - For information about available sizes of virtual machines for Cloud Services pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and A2V2. For information about available VM sizes for pools using images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + VMSize *string `json:"vmSize,omitempty"` + // CloudServiceConfiguration - This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch account was created with its poolAllocationMode property set to 'UserSubscription'. + CloudServiceConfiguration *CloudServiceConfiguration `json:"cloudServiceConfiguration,omitempty"` + // VirtualMachineConfiguration - This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. + VirtualMachineConfiguration *VirtualMachineConfiguration `json:"virtualMachineConfiguration,omitempty"` + // ResizeTimeout - This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + ResizeTimeout *string `json:"resizeTimeout,omitempty"` + // TargetDedicatedNodes - This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"` + // TargetLowPriorityNodes - This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"` + // EnableAutoScale - If false, at least one of targetDedicateNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the pool automatically resizes according to the formula. The default value is false. + EnableAutoScale *bool `json:"enableAutoScale,omitempty"` + // AutoScaleFormula - This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale compute nodes in an Azure Batch pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + AutoScaleFormula *string `json:"autoScaleFormula,omitempty"` + // AutoScaleEvaluationInterval - The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + AutoScaleEvaluationInterval *string `json:"autoScaleEvaluationInterval,omitempty"` + // EnableInterNodeCommunication - Enabling inter-node communication limits the maximum size of the pool due to deployment restrictions on the nodes of the pool. This may result in the pool not reaching its desired size. The default value is false. + EnableInterNodeCommunication *bool `json:"enableInterNodeCommunication,omitempty"` + NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"` + // StartTask - The task runs when the node is added to the pool or when the node is restarted. + StartTask *StartTask `json:"startTask,omitempty"` + // CertificateReferences - For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. + CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"` + // ApplicationPackageReferences - Changes to application package references affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged. There is a maximum of 10 application package references on any given pool. + ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"` + // ApplicationLicenses - The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, pool creation will fail. + ApplicationLicenses *[]string `json:"applicationLicenses,omitempty"` + // MaxTasksPerNode - The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + MaxTasksPerNode *int32 `json:"maxTasksPerNode,omitempty"` + TaskSchedulingPolicy *TaskSchedulingPolicy `json:"taskSchedulingPolicy,omitempty"` + UserAccounts *[]UserAccount `json:"userAccounts,omitempty"` + // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + Metadata *[]MetadataItem `json:"metadata,omitempty"` +} + +// PoolEnableAutoScaleParameter ... +type PoolEnableAutoScaleParameter struct { + // AutoScaleFormula - The formula is checked for validity before it is applied to the pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale compute nodes in an Azure Batch pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + AutoScaleFormula *string `json:"autoScaleFormula,omitempty"` + // AutoScaleEvaluationInterval - The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. + AutoScaleEvaluationInterval *string `json:"autoScaleEvaluationInterval,omitempty"` +} + +// PoolEndpointConfiguration ... +type PoolEndpointConfiguration struct { + // InboundNATPools - The maximum number of inbound NAT pools per Batch pool is 5. If the maximum number of inbound NAT pools is exceeded the request fails with HTTP status code 400. + InboundNATPools *[]InboundNATPool `json:"inboundNATPools,omitempty"` +} + +// PoolEvaluateAutoScaleParameter ... +type PoolEvaluateAutoScaleParameter struct { + // AutoScaleFormula - The formula is validated and its results calculated, but it is not applied to the pool. To apply the formula to the pool, 'Enable automatic scaling on a pool'. For more information about specifying this formula, see Automatically scale compute nodes in an Azure Batch pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + AutoScaleFormula *string `json:"autoScaleFormula,omitempty"` +} + +// PoolInformation ... +type PoolInformation struct { + // PoolID - You must ensure that the pool referenced by this property exists. If the pool does not exist at the time the Batch service tries to schedule a job, no tasks for the job will run until you create a pool with that id. Note that the Batch service will not reject the job request; it will simply not run tasks until the pool exists. You must specify either the pool ID or the auto pool specification, but not both. + PoolID *string `json:"poolId,omitempty"` + // AutoPoolSpecification - If auto pool creation fails, the Batch service moves the job to a completed state, and the pool creation error is set in the job's scheduling error property. The Batch service manages the lifetime (both creation and, unless keepAlive is specified, deletion) of the auto pool. Any user actions that affect the lifetime of the auto pool while the job is active will result in unexpected behavior. You must specify either the pool ID or the auto pool specification, but not both. + AutoPoolSpecification *AutoPoolSpecification `json:"autoPoolSpecification,omitempty"` +} + +// PoolListUsageMetricsResult ... +type PoolListUsageMetricsResult struct { + autorest.Response `json:"-"` + Value *[]PoolUsageMetrics `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// PoolListUsageMetricsResultIterator provides access to a complete listing of PoolUsageMetrics values. +type PoolListUsageMetricsResultIterator struct { + i int + page PoolListUsageMetricsResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *PoolListUsageMetricsResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolListUsageMetricsResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *PoolListUsageMetricsResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter PoolListUsageMetricsResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter PoolListUsageMetricsResultIterator) Response() PoolListUsageMetricsResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter PoolListUsageMetricsResultIterator) Value() PoolUsageMetrics { + if !iter.page.NotDone() { + return PoolUsageMetrics{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the PoolListUsageMetricsResultIterator type. +func NewPoolListUsageMetricsResultIterator(page PoolListUsageMetricsResultPage) PoolListUsageMetricsResultIterator { + return PoolListUsageMetricsResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (plumr PoolListUsageMetricsResult) IsEmpty() bool { + return plumr.Value == nil || len(*plumr.Value) == 0 +} + +// poolListUsageMetricsResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (plumr PoolListUsageMetricsResult) poolListUsageMetricsResultPreparer(ctx context.Context) (*http.Request, error) { + if plumr.OdataNextLink == nil || len(to.String(plumr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(plumr.OdataNextLink))) +} + +// PoolListUsageMetricsResultPage contains a page of PoolUsageMetrics values. +type PoolListUsageMetricsResultPage struct { + fn func(context.Context, PoolListUsageMetricsResult) (PoolListUsageMetricsResult, error) + plumr PoolListUsageMetricsResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *PoolListUsageMetricsResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolListUsageMetricsResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.plumr) + if err != nil { + return err + } + page.plumr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *PoolListUsageMetricsResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page PoolListUsageMetricsResultPage) NotDone() bool { + return !page.plumr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page PoolListUsageMetricsResultPage) Response() PoolListUsageMetricsResult { + return page.plumr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page PoolListUsageMetricsResultPage) Values() []PoolUsageMetrics { + if page.plumr.IsEmpty() { + return nil + } + return *page.plumr.Value +} + +// Creates a new instance of the PoolListUsageMetricsResultPage type. +func NewPoolListUsageMetricsResultPage(getNextPage func(context.Context, PoolListUsageMetricsResult) (PoolListUsageMetricsResult, error)) PoolListUsageMetricsResultPage { + return PoolListUsageMetricsResultPage{fn: getNextPage} +} + +// PoolNodeCounts ... +type PoolNodeCounts struct { + PoolID *string `json:"poolId,omitempty"` + Dedicated *NodeCounts `json:"dedicated,omitempty"` + LowPriority *NodeCounts `json:"lowPriority,omitempty"` +} + +// PoolNodeCountsListResult ... +type PoolNodeCountsListResult struct { + autorest.Response `json:"-"` + // Value - A list of node counts by pool. + Value *[]PoolNodeCounts `json:"value,omitempty"` + OdataNextLink *string `json:"odata.nextLink,omitempty"` +} + +// PoolNodeCountsListResultIterator provides access to a complete listing of PoolNodeCounts values. +type PoolNodeCountsListResultIterator struct { + i int + page PoolNodeCountsListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *PoolNodeCountsListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolNodeCountsListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *PoolNodeCountsListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter PoolNodeCountsListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter PoolNodeCountsListResultIterator) Response() PoolNodeCountsListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter PoolNodeCountsListResultIterator) Value() PoolNodeCounts { + if !iter.page.NotDone() { + return PoolNodeCounts{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the PoolNodeCountsListResultIterator type. +func NewPoolNodeCountsListResultIterator(page PoolNodeCountsListResultPage) PoolNodeCountsListResultIterator { + return PoolNodeCountsListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (pnclr PoolNodeCountsListResult) IsEmpty() bool { + return pnclr.Value == nil || len(*pnclr.Value) == 0 +} + +// poolNodeCountsListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (pnclr PoolNodeCountsListResult) poolNodeCountsListResultPreparer(ctx context.Context) (*http.Request, error) { + if pnclr.OdataNextLink == nil || len(to.String(pnclr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(pnclr.OdataNextLink))) +} + +// PoolNodeCountsListResultPage contains a page of PoolNodeCounts values. +type PoolNodeCountsListResultPage struct { + fn func(context.Context, PoolNodeCountsListResult) (PoolNodeCountsListResult, error) + pnclr PoolNodeCountsListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *PoolNodeCountsListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolNodeCountsListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.pnclr) + if err != nil { + return err + } + page.pnclr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *PoolNodeCountsListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page PoolNodeCountsListResultPage) NotDone() bool { + return !page.pnclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page PoolNodeCountsListResultPage) Response() PoolNodeCountsListResult { + return page.pnclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page PoolNodeCountsListResultPage) Values() []PoolNodeCounts { + if page.pnclr.IsEmpty() { + return nil + } + return *page.pnclr.Value +} + +// Creates a new instance of the PoolNodeCountsListResultPage type. +func NewPoolNodeCountsListResultPage(getNextPage func(context.Context, PoolNodeCountsListResult) (PoolNodeCountsListResult, error)) PoolNodeCountsListResultPage { + return PoolNodeCountsListResultPage{fn: getNextPage} +} + +// PoolPatchParameter ... +type PoolPatchParameter struct { + // StartTask - If this element is present, it overwrites any existing start task. If omitted, any existing start task is left unchanged. + StartTask *StartTask `json:"startTask,omitempty"` + // CertificateReferences - If this element is present, it replaces any existing certificate references configured on the pool. If omitted, any existing certificate references are left unchanged. For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. + CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"` + // ApplicationPackageReferences - Changes to application package references affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged. If this element is present, it replaces any existing application package references. If you specify an empty collection, then all application package references are removed from the pool. If omitted, any existing application package references are left unchanged. + ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"` + // Metadata - If this element is present, it replaces any existing metadata configured on the pool. If you specify an empty collection, any metadata is removed from the pool. If omitted, any existing metadata is left unchanged. + Metadata *[]MetadataItem `json:"metadata,omitempty"` +} + +// PoolResizeParameter ... +type PoolResizeParameter struct { + TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"` + TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"` + // ResizeTimeout - The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + ResizeTimeout *string `json:"resizeTimeout,omitempty"` + // NodeDeallocationOption - The default value is requeue. Possible values include: 'Requeue', 'Terminate', 'TaskCompletion', 'RetainedData' + NodeDeallocationOption ComputeNodeDeallocationOption `json:"nodeDeallocationOption,omitempty"` +} + +// PoolSpecification ... +type PoolSpecification struct { + // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + DisplayName *string `json:"displayName,omitempty"` + // VMSize - For information about available sizes of virtual machines in pools, see Choose a VM size for compute nodes in an Azure Batch pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + VMSize *string `json:"vmSize,omitempty"` + // CloudServiceConfiguration - This property must be specified if the pool needs to be created with Azure PaaS VMs. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). This property cannot be specified if the Batch account was created with its poolAllocationMode property set to 'UserSubscription'. + CloudServiceConfiguration *CloudServiceConfiguration `json:"cloudServiceConfiguration,omitempty"` + // VirtualMachineConfiguration - This property must be specified if the pool needs to be created with Azure IaaS VMs. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + VirtualMachineConfiguration *VirtualMachineConfiguration `json:"virtualMachineConfiguration,omitempty"` + // MaxTasksPerNode - The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + MaxTasksPerNode *int32 `json:"maxTasksPerNode,omitempty"` + TaskSchedulingPolicy *TaskSchedulingPolicy `json:"taskSchedulingPolicy,omitempty"` + // ResizeTimeout - This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + ResizeTimeout *string `json:"resizeTimeout,omitempty"` + // TargetDedicatedNodes - This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"` + // TargetLowPriorityNodes - This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"` + // EnableAutoScale - If false, at least one of targetDedicateNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula element is required. The pool automatically resizes according to the formula. The default value is false. + EnableAutoScale *bool `json:"enableAutoScale,omitempty"` + // AutoScaleFormula - This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. + AutoScaleFormula *string `json:"autoScaleFormula,omitempty"` + // AutoScaleEvaluationInterval - The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + AutoScaleEvaluationInterval *string `json:"autoScaleEvaluationInterval,omitempty"` + // EnableInterNodeCommunication - Enabling inter-node communication limits the maximum size of the pool due to deployment restrictions on the nodes of the pool. This may result in the pool not reaching its desired size. The default value is false. + EnableInterNodeCommunication *bool `json:"enableInterNodeCommunication,omitempty"` + NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"` + StartTask *StartTask `json:"startTask,omitempty"` + // CertificateReferences - For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. + CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"` + // ApplicationPackageReferences - Changes to application package references affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged. There is a maximum of 10 application package references on any given pool. + ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"` + // ApplicationLicenses - The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, pool creation will fail. The permitted licenses available on the pool are 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each application license added to the pool. + ApplicationLicenses *[]string `json:"applicationLicenses,omitempty"` + UserAccounts *[]UserAccount `json:"userAccounts,omitempty"` + // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + Metadata *[]MetadataItem `json:"metadata,omitempty"` +} + +// PoolStatistics ... +type PoolStatistics struct { + autorest.Response `json:"-"` + URL *string `json:"url,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"` + UsageStats *UsageStatistics `json:"usageStats,omitempty"` + ResourceStats *ResourceStatistics `json:"resourceStats,omitempty"` +} + +// PoolUpdatePropertiesParameter ... +type PoolUpdatePropertiesParameter struct { + // StartTask - If this element is present, it overwrites any existing start task. If omitted, any existing start task is removed from the pool. + StartTask *StartTask `json:"startTask,omitempty"` + // CertificateReferences - This list replaces any existing certificate references configured on the pool. If you specify an empty collection, any existing certificate references are removed from the pool. For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. + CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"` + // ApplicationPackageReferences - The list replaces any existing application package references on the pool. Changes to application package references affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged. There is a maximum of 10 application package references on any given pool. If omitted, or if you specify an empty collection, any existing application packages references are removed from the pool. A maximum of 10 references may be specified on a given pool. + ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"` + // Metadata - This list replaces any existing metadata configured on the pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the pool. + Metadata *[]MetadataItem `json:"metadata,omitempty"` +} + +// PoolUsageMetrics ... +type PoolUsageMetrics struct { + PoolID *string `json:"poolId,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + // VMSize - For information about available sizes of virtual machines in pools, see Choose a VM size for compute nodes in an Azure Batch pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + VMSize *string `json:"vmSize,omitempty"` + TotalCoreHours *float64 `json:"totalCoreHours,omitempty"` +} + +// ReadCloser ... +type ReadCloser struct { + autorest.Response `json:"-"` + Value *io.ReadCloser `json:"value,omitempty"` +} + +// RecentJob ... +type RecentJob struct { + ID *string `json:"id,omitempty"` + URL *string `json:"url,omitempty"` +} + +// ResizeError ... +type ResizeError struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Values *[]NameValuePair `json:"values,omitempty"` +} + +// ResourceFile ... +type ResourceFile struct { + // AutoStorageContainerName - The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. + AutoStorageContainerName *string `json:"autoStorageContainerName,omitempty"` + // StorageContainerURL - The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable using anonymous access; that is, the Batch service does not present any credentials when downloading blobs from the container. There are two ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, or set the ACL for the container to allow public access. + StorageContainerURL *string `json:"storageContainerUrl,omitempty"` + // HTTPURL - The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable using anonymous access; that is, the Batch service does not present any credentials when downloading the blob. There are two ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, or set the ACL for the blob or its container to allow public access. + HTTPURL *string `json:"httpUrl,omitempty"` + // BlobPrefix - The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded. + BlobPrefix *string `json:"blobPrefix,omitempty"` + // FilePath - If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the task's working directory (for example by using '..'). + FilePath *string `json:"filePath,omitempty"` + // FileMode - This property applies only to files being downloaded to Linux compute nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows node. If this property is not specified for a Linux node, then a default value of 0770 is applied to the file. + FileMode *string `json:"fileMode,omitempty"` +} + +// ResourceStatistics ... +type ResourceStatistics struct { + StartTime *date.Time `json:"startTime,omitempty"` + LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"` + AvgCPUPercentage *float64 `json:"avgCPUPercentage,omitempty"` + AvgMemoryGiB *float64 `json:"avgMemoryGiB,omitempty"` + PeakMemoryGiB *float64 `json:"peakMemoryGiB,omitempty"` + AvgDiskGiB *float64 `json:"avgDiskGiB,omitempty"` + PeakDiskGiB *float64 `json:"peakDiskGiB,omitempty"` + DiskReadIOps *int64 `json:"diskReadIOps,omitempty"` + DiskWriteIOps *int64 `json:"diskWriteIOps,omitempty"` + DiskReadGiB *float64 `json:"diskReadGiB,omitempty"` + DiskWriteGiB *float64 `json:"diskWriteGiB,omitempty"` + NetworkReadGiB *float64 `json:"networkReadGiB,omitempty"` + NetworkWriteGiB *float64 `json:"networkWriteGiB,omitempty"` +} + +// Schedule ... +type Schedule struct { + // DoNotRunUntil - If you do not specify a doNotRunUntil time, the schedule becomes ready to create jobs immediately. + DoNotRunUntil *date.Time `json:"doNotRunUntil,omitempty"` + // DoNotRunAfter - If you do not specify a doNotRunAfter time, and you are creating a recurring job schedule, the job schedule will remain active until you explicitly terminate it. + DoNotRunAfter *date.Time `json:"doNotRunAfter,omitempty"` + // StartWindow - If a job is not created within the startWindow interval, then the 'opportunity' is lost; no job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + StartWindow *string `json:"startWindow,omitempty"` + // RecurrenceInterval - Because a job schedule can have at most one active job under it at any given time, if it is time to create a new job under a job schedule, but the previous job is still running, the Batch service will not create the new job until the previous job finishes. If the previous job does not finish within the startWindow period of the new recurrenceInterval, then no new job will be scheduled for that interval. For recurring jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when jobs are created, add tasks to the jobs and terminate the jobs ready for the next recurrence. The default is that the schedule does not recur: one job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + RecurrenceInterval *string `json:"recurrenceInterval,omitempty"` +} + +// StartTask batch will retry tasks when a recovery operation is triggered on a compute node. Examples of +// recovery operations include (but are not limited to) when an unhealthy compute node is rebooted or a +// compute node disappeared due to host failure. Retries due to recovery operations are independent of and +// are not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due +// to a recovery operation may occur. Because of this, all tasks should be idempotent. This means tasks +// need to tolerate being interrupted and restarted without causing any corruption or duplicate data. The +// best practice for long running tasks is to use some form of checkpointing. In some cases the start task +// may be re-run even though the node was not rebooted. Special care should be taken to avoid start tasks +// which create breakaway process or install/launch services from the start task working directory, as this +// will block Batch from being able to re-run the start task. +type StartTask struct { + // CommandLine - The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + CommandLine *string `json:"commandLine,omitempty"` + // ContainerSettings - When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"` + // ResourceFiles - Files listed under this element are located in the task's working directory. + ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"` + EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"` + // UserIdentity - If omitted, the task runs as a non-administrative user unique to the task. + UserIdentity *UserIdentity `json:"userIdentity,omitempty"` + // MaxTaskRetryCount - The Batch service retries a task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the task. If the maximum retry count is -1, the Batch service retries the task without limit. + MaxTaskRetryCount *int32 `json:"maxTaskRetryCount,omitempty"` + // WaitForSuccess - If true and the start task fails on a compute node, the Batch service retries the start task up to its maximum retry count (maxTaskRetryCount). If the task has still not completed successfully after all retries, then the Batch service marks the compute node unusable, and will not schedule tasks to it. This condition can be detected via the node state and failure info details. If false, the Batch service will not wait for the start task to complete. In this case, other tasks can start executing on the compute node while the start task is still running; and even if the start task fails, new tasks will continue to be scheduled on the node. The default is false. + WaitForSuccess *bool `json:"waitForSuccess,omitempty"` +} + +// StartTaskInformation ... +type StartTaskInformation struct { + // State - Possible values include: 'StartTaskStateRunning', 'StartTaskStateCompleted' + State StartTaskState `json:"state,omitempty"` + // StartTime - This value is reset every time the task is restarted or retried (that is, this is the most recent time at which the start task started running). + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - This is the end time of the most recent run of the start task, if that run has completed (even if that run failed and a retry is pending). This element is not present if the start task is currently running. + EndTime *date.Time `json:"endTime,omitempty"` + // ExitCode - This property is set only if the start task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the start task (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + ExitCode *int32 `json:"exitCode,omitempty"` + // ContainerInfo - This property is set only if the task runs in a container context. + ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"` + // FailureInfo - This property is set only if the task is in the completed state and encountered a failure. + FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"` + // RetryCount - Task application failures (non-zero exit code) are retried, pre-processing errors (the task could not be run) and file upload errors are not retried. The Batch service will retry the task up to the limit specified by the constraints. + RetryCount *int32 `json:"retryCount,omitempty"` + // LastRetryTime - This element is present only if the task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the task has been restarted for reasons other than retry; for example, if the compute node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + LastRetryTime *date.Time `json:"lastRetryTime,omitempty"` + // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure' + Result TaskExecutionResult `json:"result,omitempty"` +} + +// SubtaskInformation ... +type SubtaskInformation struct { + ID *int32 `json:"id,omitempty"` + NodeInfo *ComputeNodeInformation `json:"nodeInfo,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - This property is set only if the subtask is in the Completed state. + EndTime *date.Time `json:"endTime,omitempty"` + // ExitCode - This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + ExitCode *int32 `json:"exitCode,omitempty"` + // ContainerInfo - This property is set only if the task runs in a container context. + ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"` + // FailureInfo - This property is set only if the task is in the completed state and encountered a failure. + FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"` + // State - Possible values include: 'SubtaskStatePreparing', 'SubtaskStateRunning', 'SubtaskStateCompleted' + State SubtaskState `json:"state,omitempty"` + StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"` + // PreviousState - This property is not set if the subtask is in its initial running state. Possible values include: 'SubtaskStatePreparing', 'SubtaskStateRunning', 'SubtaskStateCompleted' + PreviousState SubtaskState `json:"previousState,omitempty"` + // PreviousStateTransitionTime - This property is not set if the subtask is in its initial running state. + PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"` + // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure' + Result TaskExecutionResult `json:"result,omitempty"` +} + +// TaskAddCollectionParameter ... +type TaskAddCollectionParameter struct { + // Value - The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer tasks. + Value *[]TaskAddParameter `json:"value,omitempty"` +} + +// TaskAddCollectionResult ... +type TaskAddCollectionResult struct { + autorest.Response `json:"-"` + Value *[]TaskAddResult `json:"value,omitempty"` +} + +// TaskAddParameter batch will retry tasks when a recovery operation is triggered on a compute node. +// Examples of recovery operations include (but are not limited to) when an unhealthy compute node is +// rebooted or a compute node disappeared due to host failure. Retries due to recovery operations are +// independent of and are not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an +// internal retry due to a recovery operation may occur. Because of this, all tasks should be idempotent. +// This means tasks need to tolerate being interrupted and restarted without causing any corruption or +// duplicate data. The best practice for long running tasks is to use some form of checkpointing. +type TaskAddParameter struct { + // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a job that differ only by case). + ID *string `json:"id,omitempty"` + // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + DisplayName *string `json:"displayName,omitempty"` + // CommandLine - For multi-instance tasks, the command line is executed as the primary task, after the primary task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + CommandLine *string `json:"commandLine,omitempty"` + // ContainerSettings - If the pool that will run this task has containerConfiguration set, this must be set as well. If the pool that will run this task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"` + // ExitConditions - How the Batch service should respond when the task completes. + ExitConditions *ExitConditions `json:"exitConditions,omitempty"` + // ResourceFiles - For multi-instance tasks, the resource files will only be downloaded to the compute node on which the primary task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"` + // OutputFiles - For multi-instance tasks, the files will only be uploaded from the compute node on which the primary task is executed. + OutputFiles *[]OutputFile `json:"outputFiles,omitempty"` + EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"` + AffinityInfo *AffinityInformation `json:"affinityInfo,omitempty"` + // Constraints - If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the job, the maxWallClockTime is infinite, and the retentionTime is 7 days. + Constraints *TaskConstraints `json:"constraints,omitempty"` + // UserIdentity - If omitted, the task runs as a non-administrative user unique to the task. + UserIdentity *UserIdentity `json:"userIdentity,omitempty"` + MultiInstanceSettings *MultiInstanceSettings `json:"multiInstanceSettings,omitempty"` + // DependsOn - This task will not be scheduled until all tasks that it depends on have completed successfully. If any of those tasks fail and exhaust their retry counts, this task will never be scheduled. If the job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. + DependsOn *TaskDependencies `json:"dependsOn,omitempty"` + // ApplicationPackageReferences - Application packages are downloaded and deployed to a shared directory, not the task working directory. Therefore, if a referenced package is already on the compute node, and is up to date, then it is not re-downloaded; the existing copy on the compute node is used. If a referenced application package cannot be installed, for example because the package has been deleted or because download failed, the task fails. + ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"` + // AuthenticationTokenSettings - If this property is set, the Batch service provides the task with an authentication token which can be used to authenticate Batch service operations without requiring an account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the task can carry out using the token depend on the settings. For example, a task can request job permissions in order to add other tasks to the job, or check the status of the job or of other tasks under the job. + AuthenticationTokenSettings *AuthenticationTokenSettings `json:"authenticationTokenSettings,omitempty"` +} + +// TaskAddResult ... +type TaskAddResult struct { + // Status - Possible values include: 'TaskAddStatusSuccess', 'TaskAddStatusClientError', 'TaskAddStatusServerError' + Status TaskAddStatus `json:"status,omitempty"` + TaskID *string `json:"taskId,omitempty"` + // ETag - You can use this to detect whether the task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the job in the meantime. + ETag *string `json:"eTag,omitempty"` + LastModified *date.Time `json:"lastModified,omitempty"` + Location *string `json:"location,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// TaskConstraints ... +type TaskConstraints struct { + // MaxWallClockTime - If this is not specified, there is no time limit on how long the task may run. + MaxWallClockTime *string `json:"maxWallClockTime,omitempty"` + // RetentionTime - The default is 7 days, i.e. the task directory will be retained for 7 days unless the compute node is removed or the job is deleted. + RetentionTime *string `json:"retentionTime,omitempty"` + // MaxTaskRetryCount - Note that this value specifically controls the number of retries for the task executable due to a nonzero exit code. The Batch service will try the task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the task after the first attempt. If the maximum retry count is -1, the Batch service retries the task without limit. + MaxTaskRetryCount *int32 `json:"maxTaskRetryCount,omitempty"` +} + +// TaskContainerExecutionInformation ... +type TaskContainerExecutionInformation struct { + ContainerID *string `json:"containerId,omitempty"` + // State - This is the state of the container according to the Docker service. It is equivalent to the status field returned by "docker inspect". + State *string `json:"state,omitempty"` + // Error - This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by "docker inspect". + Error *string `json:"error,omitempty"` +} + +// TaskContainerSettings ... +type TaskContainerSettings struct { + // ContainerRunOptions - These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. + ContainerRunOptions *string `json:"containerRunOptions,omitempty"` + // ImageName - This is the full image reference, as would be specified to "docker pull". If no tag is provided as part of the image name, the tag ":latest" is used as a default. + ImageName *string `json:"imageName,omitempty"` + // Registry - This setting can be omitted if was already provided at pool creation. + Registry *ContainerRegistry `json:"registry,omitempty"` + // WorkingDirectory - The default is 'taskWorkingDirectory'. Possible values include: 'TaskWorkingDirectory', 'ContainerImageDefault' + WorkingDirectory ContainerWorkingDirectory `json:"workingDirectory,omitempty"` +} + +// TaskCounts ... +type TaskCounts struct { + autorest.Response `json:"-"` + Active *int32 `json:"active,omitempty"` + Running *int32 `json:"running,omitempty"` + Completed *int32 `json:"completed,omitempty"` + Succeeded *int32 `json:"succeeded,omitempty"` + Failed *int32 `json:"failed,omitempty"` +} + +// TaskDependencies ... +type TaskDependencies struct { + // TaskIds - The taskIds collection is limited to 64000 characters total (i.e. the combined length of all task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using task ID ranges instead. + TaskIds *[]string `json:"taskIds,omitempty"` + TaskIDRanges *[]TaskIDRange `json:"taskIdRanges,omitempty"` +} + +// TaskExecutionInformation ... +type TaskExecutionInformation struct { + // StartTime - 'Running' corresponds to the running state, so if the task specifies resource files or application packages, then the start time reflects the time at which the task started downloading or deploying these. If the task has been restarted or retried, this is the most recent time at which the task started running. This property is present only for tasks that are in the running or completed state. + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - This property is set only if the task is in the Completed state. + EndTime *date.Time `json:"endTime,omitempty"` + // ExitCode - This property is set only if the task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the task (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + ExitCode *int32 `json:"exitCode,omitempty"` + // ContainerInfo - This property is set only if the task runs in a container context. + ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"` + // FailureInfo - This property is set only if the task is in the completed state and encountered a failure. + FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"` + // RetryCount - Task application failures (non-zero exit code) are retried, pre-processing errors (the task could not be run) and file upload errors are not retried. The Batch service will retry the task up to the limit specified by the constraints. + RetryCount *int32 `json:"retryCount,omitempty"` + // LastRetryTime - This element is present only if the task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the task has been restarted for reasons other than retry; for example, if the compute node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + LastRetryTime *date.Time `json:"lastRetryTime,omitempty"` + // RequeueCount - When the user removes nodes from a pool (by resizing/shrinking the pool) or when the job is being disabled, the user can specify that running tasks on the nodes be requeued for execution. This count tracks how many times the task has been requeued for these reasons. + RequeueCount *int32 `json:"requeueCount,omitempty"` + // LastRequeueTime - This property is set only if the requeueCount is nonzero. + LastRequeueTime *date.Time `json:"lastRequeueTime,omitempty"` + // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure' + Result TaskExecutionResult `json:"result,omitempty"` +} + +// TaskFailureInformation ... +type TaskFailureInformation struct { + // Category - Possible values include: 'UserError', 'ServerError' + Category ErrorCategory `json:"category,omitempty"` + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Details *[]NameValuePair `json:"details,omitempty"` +} + +// TaskIDRange the start and end of the range are inclusive. For example, if a range has start 9 and end +// 12, then it represents tasks '9', '10', '11' and '12'. +type TaskIDRange struct { + Start *int32 `json:"start,omitempty"` + End *int32 `json:"end,omitempty"` +} + +// TaskInformation ... +type TaskInformation struct { + TaskURL *string `json:"taskUrl,omitempty"` + JobID *string `json:"jobId,omitempty"` + TaskID *string `json:"taskId,omitempty"` + SubtaskID *int32 `json:"subtaskId,omitempty"` + // TaskState - Possible values include: 'TaskStateActive', 'TaskStatePreparing', 'TaskStateRunning', 'TaskStateCompleted' + TaskState TaskState `json:"taskState,omitempty"` + ExecutionInfo *TaskExecutionInformation `json:"executionInfo,omitempty"` +} + +// TaskSchedulingPolicy ... +type TaskSchedulingPolicy struct { + // NodeFillType - If not specified, the default is spread. Possible values include: 'Spread', 'Pack' + NodeFillType ComputeNodeFillType `json:"nodeFillType,omitempty"` +} + +// TaskStatistics ... +type TaskStatistics struct { + URL *string `json:"url,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"` + UserCPUTime *string `json:"userCPUTime,omitempty"` + KernelCPUTime *string `json:"kernelCPUTime,omitempty"` + // WallClockTime - The wall clock time is the elapsed time from when the task started running on a compute node to when it finished (or to the last time the statistics were updated, if the task had not finished by then). If the task was retried, this includes the wall clock time of all the task retries. + WallClockTime *string `json:"wallClockTime,omitempty"` + ReadIOps *int64 `json:"readIOps,omitempty"` + WriteIOps *int64 `json:"writeIOps,omitempty"` + ReadIOGiB *float64 `json:"readIOGiB,omitempty"` + WriteIOGiB *float64 `json:"writeIOGiB,omitempty"` + WaitTime *string `json:"waitTime,omitempty"` +} + +// TaskUpdateParameter ... +type TaskUpdateParameter struct { + // Constraints - If omitted, the task is given the default constraints. For multi-instance tasks, updating the retention time applies only to the primary task and not subtasks. + Constraints *TaskConstraints `json:"constraints,omitempty"` +} + +// UploadBatchServiceLogsConfiguration ... +type UploadBatchServiceLogsConfiguration struct { + // ContainerURL - The URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. + ContainerURL *string `json:"containerUrl,omitempty"` + // StartTime - Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. + EndTime *date.Time `json:"endTime,omitempty"` +} + +// UploadBatchServiceLogsResult ... +type UploadBatchServiceLogsResult struct { + autorest.Response `json:"-"` + // VirtualDirectoryName - The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier. + VirtualDirectoryName *string `json:"virtualDirectoryName,omitempty"` + NumberOfFilesUploaded *int32 `json:"numberOfFilesUploaded,omitempty"` +} + +// UsageStatistics ... +type UsageStatistics struct { + StartTime *date.Time `json:"startTime,omitempty"` + LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"` + DedicatedCoreTime *string `json:"dedicatedCoreTime,omitempty"` +} + +// UserAccount ... +type UserAccount struct { + Name *string `json:"name,omitempty"` + Password *string `json:"password,omitempty"` + // ElevationLevel - The default value is nonAdmin. Possible values include: 'NonAdmin', 'Admin' + ElevationLevel ElevationLevel `json:"elevationLevel,omitempty"` + // LinuxUserConfiguration - This property is ignored if specified on a Windows pool. If not specified, the user is created with the default options. + LinuxUserConfiguration *LinuxUserConfiguration `json:"linuxUserConfiguration,omitempty"` + // WindowsUserConfiguration - This property can only be specified if the user is on a Windows pool. If not specified and on a Windows pool, the user is created with the default options. + WindowsUserConfiguration *WindowsUserConfiguration `json:"windowsUserConfiguration,omitempty"` +} + +// UserIdentity specify either the userName or autoUser property, but not both. +type UserIdentity struct { + // UserName - The userName and autoUser properties are mutually exclusive; you must specify one but not both. + UserName *string `json:"username,omitempty"` + // AutoUser - The userName and autoUser properties are mutually exclusive; you must specify one but not both. + AutoUser *AutoUserSpecification `json:"autoUser,omitempty"` +} + +// VirtualMachineConfiguration ... +type VirtualMachineConfiguration struct { + ImageReference *ImageReference `json:"imageReference,omitempty"` + // NodeAgentSKUID - The Batch node agent is a program that runs on each node in the pool, and provides the command-and-control interface between the node and the Batch service. There are different implementations of the node agent, known as SKUs, for different operating systems. You must specify a node agent SKU which matches the selected image reference. To get the list of supported node agent SKUs along with their list of verified image references, see the 'List supported node agent SKUs' operation. + NodeAgentSKUID *string `json:"nodeAgentSKUId,omitempty"` + // WindowsConfiguration - This property must not be specified if the imageReference property specifies a Linux OS image. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + // DataDisks - This property must be specified if the compute nodes in the pool need to have empty data disks attached to them. This cannot be updated. Each node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the node is removed from the pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + DataDisks *[]DataDisk `json:"dataDisks,omitempty"` + // LicenseType - This only applies to images that contain the Windows operating system, and should only be used when you hold valid on-premises licenses for the nodes which will be deployed. If omitted, no on-premises licensing discount is applied. Values are: + // Windows_Server - The on-premises license is for Windows Server. + // Windows_Client - The on-premises license is for Windows Client. + LicenseType *string `json:"licenseType,omitempty"` + // ContainerConfiguration - If specified, setup is performed on each node in the pool to allow tasks to run in containers. All regular tasks and job manager tasks run on this pool must specify the containerSettings property, and all other tasks may specify it. + ContainerConfiguration *ContainerConfiguration `json:"containerConfiguration,omitempty"` +} + +// WindowsConfiguration ... +type WindowsConfiguration struct { + // EnableAutomaticUpdates - If omitted, the default value is true. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` +} + +// WindowsUserConfiguration ... +type WindowsUserConfiguration struct { + // LoginMode - The default value for VirtualMachineConfiguration pools is 'batch' and for CloudServiceConfiguration pools is 'interactive'. Possible values include: 'Batch', 'Interactive' + LoginMode LoginMode `json:"loginMode,omitempty"` +} diff --git a/services/batch/2019-06-01.9.0/batch/pool.go b/services/batch/2019-06-01.9.0/batch/pool.go new file mode 100644 index 000000000000..3b85ba0ac4f6 --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/pool.go @@ -0,0 +1,2055 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// PoolClient is the a client for issuing REST requests to the Azure Batch service. +type PoolClient struct { + BaseClient +} + +// NewPoolClient creates an instance of the PoolClient client. +func NewPoolClient(batchURL string) PoolClient { + return PoolClient{New(batchURL)} +} + +// Add when naming pools, avoid including sensitive information such as user names or secret project names. This +// information may appear in telemetry logs accessible to Microsoft Support engineers. +// Parameters: +// pool - the pool to be added. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client PoolClient) Add(ctx context.Context, pool PoolAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Add") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: pool, + Constraints: []validation.Constraint{{Target: "pool.ID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "pool.VMSize", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "pool.CloudServiceConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "pool.NetworkConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "pool.StartTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("batch.PoolClient", "Add", err.Error()) + } + + req, err := client.AddPreparer(ctx, pool, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Add", nil, "Failure preparing request") + return + } + + resp, err := client.AddSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Add", resp, "Failure sending request") + return + } + + result, err = client.AddResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Add", resp, "Failure responding to request") + } + + return +} + +// AddPreparer prepares the Add request. +func (client PoolClient) AddPreparer(ctx context.Context, pool PoolAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/pools"), + autorest.WithJSON(pool), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AddSender sends the Add request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) AddSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// AddResponder handles the response to the Add request. The method always +// closes the http.Response Body. +func (client PoolClient) AddResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete when you request that a pool be deleted, the following actions occur: the pool state is set to deleting; any +// ongoing resize operation on the pool are stopped; the Batch service starts resizing the pool to zero nodes; any +// tasks running on existing nodes are terminated and requeued (as if a resize pool operation had been requested with +// the default requeue option); finally, the pool is removed from the system. Because running tasks are requeued, the +// user can rerun these tasks by updating their job to target a different pool. The tasks can then run on the new pool. +// If you want to override the requeue behavior, then you should call resize pool explicitly to shrink the pool to zero +// size before deleting the pool. If you call an Update, Patch or Delete API on a pool in the deleting state, it will +// fail with HTTP status code 409 with error code PoolBeingDeleted. +// Parameters: +// poolID - the ID of the pool to delete. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client PoolClient) Delete(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, poolID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client PoolClient) DeletePreparer(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PoolClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DisableAutoScale sends the disable auto scale request. +// Parameters: +// poolID - the ID of the pool on which to disable automatic scaling. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client PoolClient) DisableAutoScale(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.DisableAutoScale") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DisableAutoScalePreparer(ctx, poolID, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "DisableAutoScale", nil, "Failure preparing request") + return + } + + resp, err := client.DisableAutoScaleSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "DisableAutoScale", resp, "Failure sending request") + return + } + + result, err = client.DisableAutoScaleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "DisableAutoScale", resp, "Failure responding to request") + } + + return +} + +// DisableAutoScalePreparer prepares the DisableAutoScale request. +func (client PoolClient) DisableAutoScalePreparer(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/disableautoscale", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DisableAutoScaleSender sends the DisableAutoScale request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) DisableAutoScaleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DisableAutoScaleResponder handles the response to the DisableAutoScale request. The method always +// closes the http.Response Body. +func (client PoolClient) DisableAutoScaleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// EnableAutoScale you cannot enable automatic scaling on a pool if a resize operation is in progress on the pool. If +// automatic scaling of the pool is currently disabled, you must specify a valid autoscale formula as part of the +// request. If automatic scaling of the pool is already enabled, you may specify a new autoscale formula and/or a new +// evaluation interval. You cannot call this API for the same pool more than once every 30 seconds. +// Parameters: +// poolID - the ID of the pool on which to enable automatic scaling. +// poolEnableAutoScaleParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client PoolClient) EnableAutoScale(ctx context.Context, poolID string, poolEnableAutoScaleParameter PoolEnableAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.EnableAutoScale") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.EnableAutoScalePreparer(ctx, poolID, poolEnableAutoScaleParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "EnableAutoScale", nil, "Failure preparing request") + return + } + + resp, err := client.EnableAutoScaleSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "EnableAutoScale", resp, "Failure sending request") + return + } + + result, err = client.EnableAutoScaleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "EnableAutoScale", resp, "Failure responding to request") + } + + return +} + +// EnableAutoScalePreparer prepares the EnableAutoScale request. +func (client PoolClient) EnableAutoScalePreparer(ctx context.Context, poolID string, poolEnableAutoScaleParameter PoolEnableAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/enableautoscale", pathParameters), + autorest.WithJSON(poolEnableAutoScaleParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// EnableAutoScaleSender sends the EnableAutoScale request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) EnableAutoScaleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// EnableAutoScaleResponder handles the response to the EnableAutoScale request. The method always +// closes the http.Response Body. +func (client PoolClient) EnableAutoScaleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// EvaluateAutoScale this API is primarily for validating an autoscale formula, as it simply returns the result without +// applying the formula to the pool. The pool must have auto scaling enabled in order to evaluate a formula. +// Parameters: +// poolID - the ID of the pool on which to evaluate the automatic scaling formula. +// poolEvaluateAutoScaleParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client PoolClient) EvaluateAutoScale(ctx context.Context, poolID string, poolEvaluateAutoScaleParameter PoolEvaluateAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result AutoScaleRun, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.EvaluateAutoScale") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: poolEvaluateAutoScaleParameter, + Constraints: []validation.Constraint{{Target: "poolEvaluateAutoScaleParameter.AutoScaleFormula", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("batch.PoolClient", "EvaluateAutoScale", err.Error()) + } + + req, err := client.EvaluateAutoScalePreparer(ctx, poolID, poolEvaluateAutoScaleParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "EvaluateAutoScale", nil, "Failure preparing request") + return + } + + resp, err := client.EvaluateAutoScaleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.PoolClient", "EvaluateAutoScale", resp, "Failure sending request") + return + } + + result, err = client.EvaluateAutoScaleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "EvaluateAutoScale", resp, "Failure responding to request") + } + + return +} + +// EvaluateAutoScalePreparer prepares the EvaluateAutoScale request. +func (client PoolClient) EvaluateAutoScalePreparer(ctx context.Context, poolID string, poolEvaluateAutoScaleParameter PoolEvaluateAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/evaluateautoscale", pathParameters), + autorest.WithJSON(poolEvaluateAutoScaleParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// EvaluateAutoScaleSender sends the EvaluateAutoScale request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) EvaluateAutoScaleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// EvaluateAutoScaleResponder handles the response to the EvaluateAutoScale request. The method always +// closes the http.Response Body. +func (client PoolClient) EvaluateAutoScaleResponder(resp *http.Response) (result AutoScaleRun, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Exists gets basic properties of a pool. +// Parameters: +// poolID - the ID of the pool to get. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client PoolClient) Exists(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Exists") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ExistsPreparer(ctx, poolID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Exists", nil, "Failure preparing request") + return + } + + resp, err := client.ExistsSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Exists", resp, "Failure sending request") + return + } + + result, err = client.ExistsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Exists", resp, "Failure responding to request") + } + + return +} + +// ExistsPreparer prepares the Exists request. +func (client PoolClient) ExistsPreparer(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExistsSender sends the Exists request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) ExistsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ExistsResponder handles the response to the Exists request. The method always +// closes the http.Response Body. +func (client PoolClient) ExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about the specified pool. +// Parameters: +// poolID - the ID of the pool to get. +// selectParameter - an OData $select clause. +// expand - an OData $expand clause. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client PoolClient) Get(ctx context.Context, poolID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result CloudPool, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, poolID, selectParameter, expand, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client PoolClient) GetPreparer(ctx context.Context, poolID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PoolClient) GetResponder(resp *http.Response) (result CloudPool, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAllLifetimeStatistics statistics are aggregated across all pools that have ever existed in the account, from +// account creation to the last update time of the statistics. The statistics may not be immediately available. The +// Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. +// Parameters: +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client PoolClient) GetAllLifetimeStatistics(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolStatistics, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.GetAllLifetimeStatistics") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetAllLifetimeStatisticsPreparer(ctx, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "GetAllLifetimeStatistics", nil, "Failure preparing request") + return + } + + resp, err := client.GetAllLifetimeStatisticsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.PoolClient", "GetAllLifetimeStatistics", resp, "Failure sending request") + return + } + + result, err = client.GetAllLifetimeStatisticsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "GetAllLifetimeStatistics", resp, "Failure responding to request") + } + + return +} + +// GetAllLifetimeStatisticsPreparer prepares the GetAllLifetimeStatistics request. +func (client PoolClient) GetAllLifetimeStatisticsPreparer(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/lifetimepoolstats"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAllLifetimeStatisticsSender sends the GetAllLifetimeStatistics request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) GetAllLifetimeStatisticsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetAllLifetimeStatisticsResponder handles the response to the GetAllLifetimeStatistics request. The method always +// closes the http.Response Body. +func (client PoolClient) GetAllLifetimeStatisticsResponder(resp *http.Response) (result PoolStatistics, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List sends the list request. +// Parameters: +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. +// selectParameter - an OData $select clause. +// expand - an OData $expand clause. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 pools can be returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client PoolClient) List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudPoolListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.List") + defer func() { + sc := -1 + if result.cplr.Response.Response != nil { + sc = result.cplr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.PoolClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.cplr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.PoolClient", "List", resp, "Failure sending request") + return + } + + result.cplr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client PoolClient) ListPreparer(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/pools"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client PoolClient) ListResponder(resp *http.Response) (result CloudPoolListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client PoolClient) listNextResults(ctx context.Context, lastResults CloudPoolListResult) (result CloudPoolListResult, err error) { + req, err := lastResults.cloudPoolListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client PoolClient) ListComplete(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudPoolListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// ListUsageMetrics if you do not specify a $filter clause including a poolId, the response includes all pools that +// existed in the account in the time range of the returned aggregation intervals. If you do not specify a $filter +// clause including a startTime or endTime these filters default to the start and end times of the last aggregation +// interval currently available; that is, only the last aggregation interval is returned. +// Parameters: +// startTime - the earliest time from which to include metrics. This must be at least two and a half hours +// before the current time. If not specified this defaults to the start time of the last aggregation interval +// currently available. +// endTime - the latest time from which to include metrics. This must be at least two hours before the current +// time. If not specified this defaults to the end time of the last aggregation interval currently available. +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 results will be +// returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client PoolClient) ListUsageMetrics(ctx context.Context, startTime *date.Time, endTime *date.Time, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolListUsageMetricsResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.ListUsageMetrics") + defer func() { + sc := -1 + if result.plumr.Response.Response != nil { + sc = result.plumr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.PoolClient", "ListUsageMetrics", err.Error()) + } + + result.fn = client.listUsageMetricsNextResults + req, err := client.ListUsageMetricsPreparer(ctx, startTime, endTime, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "ListUsageMetrics", nil, "Failure preparing request") + return + } + + resp, err := client.ListUsageMetricsSender(req) + if err != nil { + result.plumr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.PoolClient", "ListUsageMetrics", resp, "Failure sending request") + return + } + + result.plumr, err = client.ListUsageMetricsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "ListUsageMetrics", resp, "Failure responding to request") + } + + return +} + +// ListUsageMetricsPreparer prepares the ListUsageMetrics request. +func (client PoolClient) ListUsageMetricsPreparer(ctx context.Context, startTime *date.Time, endTime *date.Time, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if startTime != nil { + queryParameters["starttime"] = autorest.Encode("query", *startTime) + } + if endTime != nil { + queryParameters["endtime"] = autorest.Encode("query", *endTime) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPath("/poolusagemetrics"), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListUsageMetricsSender sends the ListUsageMetrics request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) ListUsageMetricsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListUsageMetricsResponder handles the response to the ListUsageMetrics request. The method always +// closes the http.Response Body. +func (client PoolClient) ListUsageMetricsResponder(resp *http.Response) (result PoolListUsageMetricsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listUsageMetricsNextResults retrieves the next set of results, if any. +func (client PoolClient) listUsageMetricsNextResults(ctx context.Context, lastResults PoolListUsageMetricsResult) (result PoolListUsageMetricsResult, err error) { + req, err := lastResults.poolListUsageMetricsResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listUsageMetricsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListUsageMetricsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listUsageMetricsNextResults", resp, "Failure sending next results request") + } + result, err = client.ListUsageMetricsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "listUsageMetricsNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListUsageMetricsComplete enumerates all values, automatically crossing page boundaries as required. +func (client PoolClient) ListUsageMetricsComplete(ctx context.Context, startTime *date.Time, endTime *date.Time, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolListUsageMetricsResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.ListUsageMetrics") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListUsageMetrics(ctx, startTime, endTime, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// Patch this only replaces the pool properties specified in the request. For example, if the pool has a start task +// associated with it, and a request does not specify a start task element, then the pool keeps the existing start +// task. +// Parameters: +// poolID - the ID of the pool to update. +// poolPatchParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client PoolClient) Patch(ctx context.Context, poolID string, poolPatchParameter PoolPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Patch") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.PatchPreparer(ctx, poolID, poolPatchParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Patch", nil, "Failure preparing request") + return + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Patch", resp, "Failure sending request") + return + } + + result, err = client.PatchResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Patch", resp, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client PoolClient) PatchPreparer(ctx context.Context, poolID string, poolPatchParameter PoolPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}", pathParameters), + autorest.WithJSON(poolPatchParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) PatchSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client PoolClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// RemoveNodes this operation can only run when the allocation state of the pool is steady. When this operation runs, +// the allocation state changes from steady to resizing. +// Parameters: +// poolID - the ID of the pool from which you want to remove nodes. +// nodeRemoveParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client PoolClient) RemoveNodes(ctx context.Context, poolID string, nodeRemoveParameter NodeRemoveParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.RemoveNodes") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: nodeRemoveParameter, + Constraints: []validation.Constraint{{Target: "nodeRemoveParameter.NodeList", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "nodeRemoveParameter.NodeList", Name: validation.MaxItems, Rule: 100, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("batch.PoolClient", "RemoveNodes", err.Error()) + } + + req, err := client.RemoveNodesPreparer(ctx, poolID, nodeRemoveParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "RemoveNodes", nil, "Failure preparing request") + return + } + + resp, err := client.RemoveNodesSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "RemoveNodes", resp, "Failure sending request") + return + } + + result, err = client.RemoveNodesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "RemoveNodes", resp, "Failure responding to request") + } + + return +} + +// RemoveNodesPreparer prepares the RemoveNodes request. +func (client PoolClient) RemoveNodesPreparer(ctx context.Context, poolID string, nodeRemoveParameter NodeRemoveParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/removenodes", pathParameters), + autorest.WithJSON(nodeRemoveParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RemoveNodesSender sends the RemoveNodes request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) RemoveNodesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// RemoveNodesResponder handles the response to the RemoveNodes request. The method always +// closes the http.Response Body. +func (client PoolClient) RemoveNodesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Resize you can only resize a pool when its allocation state is steady. If the pool is already resizing, the request +// fails with status code 409. When you resize a pool, the pool's allocation state changes from steady to resizing. You +// cannot resize pools which are configured for automatic scaling. If you try to do this, the Batch service returns an +// error 409. If you resize a pool downwards, the Batch service chooses which nodes to remove. To remove specific +// nodes, use the pool remove nodes API instead. +// Parameters: +// poolID - the ID of the pool to resize. +// poolResizeParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client PoolClient) Resize(ctx context.Context, poolID string, poolResizeParameter PoolResizeParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Resize") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ResizePreparer(ctx, poolID, poolResizeParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Resize", nil, "Failure preparing request") + return + } + + resp, err := client.ResizeSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Resize", resp, "Failure sending request") + return + } + + result, err = client.ResizeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "Resize", resp, "Failure responding to request") + } + + return +} + +// ResizePreparer prepares the Resize request. +func (client PoolClient) ResizePreparer(ctx context.Context, poolID string, poolResizeParameter PoolResizeParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/resize", pathParameters), + autorest.WithJSON(poolResizeParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ResizeSender sends the Resize request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) ResizeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ResizeResponder handles the response to the Resize request. The method always +// closes the http.Response Body. +func (client PoolClient) ResizeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// StopResize this does not restore the pool to its previous state before the resize operation: it only stops any +// further changes being made, and the pool maintains its current state. After stopping, the pool stabilizes at the +// number of nodes it was at when the stop operation was done. During the stop operation, the pool allocation state +// changes first to stopping and then to steady. A resize operation need not be an explicit resize pool request; this +// API can also be used to halt the initial sizing of the pool when it is created. +// Parameters: +// poolID - the ID of the pool whose resizing you want to stop. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client PoolClient) StopResize(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.StopResize") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.StopResizePreparer(ctx, poolID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "StopResize", nil, "Failure preparing request") + return + } + + resp, err := client.StopResizeSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "StopResize", resp, "Failure sending request") + return + } + + result, err = client.StopResizeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "StopResize", resp, "Failure responding to request") + } + + return +} + +// StopResizePreparer prepares the StopResize request. +func (client PoolClient) StopResizePreparer(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/stopresize", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StopResizeSender sends the StopResize request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) StopResizeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// StopResizeResponder handles the response to the StopResize request. The method always +// closes the http.Response Body. +func (client PoolClient) StopResizeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// UpdateProperties this fully replaces all the updatable properties of the pool. For example, if the pool has a start +// task associated with it and if start task is not specified with this request, then the Batch service will remove the +// existing start task. +// Parameters: +// poolID - the ID of the pool to update. +// poolUpdatePropertiesParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client PoolClient) UpdateProperties(ctx context.Context, poolID string, poolUpdatePropertiesParameter PoolUpdatePropertiesParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.UpdateProperties") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: poolUpdatePropertiesParameter, + Constraints: []validation.Constraint{{Target: "poolUpdatePropertiesParameter.StartTask", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "poolUpdatePropertiesParameter.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + {Target: "poolUpdatePropertiesParameter.CertificateReferences", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "poolUpdatePropertiesParameter.ApplicationPackageReferences", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "poolUpdatePropertiesParameter.Metadata", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("batch.PoolClient", "UpdateProperties", err.Error()) + } + + req, err := client.UpdatePropertiesPreparer(ctx, poolID, poolUpdatePropertiesParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "UpdateProperties", nil, "Failure preparing request") + return + } + + resp, err := client.UpdatePropertiesSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.PoolClient", "UpdateProperties", resp, "Failure sending request") + return + } + + result, err = client.UpdatePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.PoolClient", "UpdateProperties", resp, "Failure responding to request") + } + + return +} + +// UpdatePropertiesPreparer prepares the UpdateProperties request. +func (client PoolClient) UpdatePropertiesPreparer(ctx context.Context, poolID string, poolUpdatePropertiesParameter PoolUpdatePropertiesParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "poolId": autorest.Encode("path", poolID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/pools/{poolId}/updateproperties", pathParameters), + autorest.WithJSON(poolUpdatePropertiesParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdatePropertiesSender sends the UpdateProperties request. The method will close the +// http.Response Body if it receives an error. +func (client PoolClient) UpdatePropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdatePropertiesResponder handles the response to the UpdateProperties request. The method always +// closes the http.Response Body. +func (client PoolClient) UpdatePropertiesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/services/batch/2019-06-01.9.0/batch/task.go b/services/batch/2019-06-01.9.0/batch/task.go new file mode 100644 index 000000000000..ebccfc5beb07 --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/task.go @@ -0,0 +1,1258 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// TaskClient is the a client for issuing REST requests to the Azure Batch service. +type TaskClient struct { + BaseClient +} + +// NewTaskClient creates an instance of the TaskClient client. +func NewTaskClient(batchURL string) TaskClient { + return TaskClient{New(batchURL)} +} + +// Add the maximum lifetime of a task from addition to completion is 180 days. If a task has not completed within 180 +// days of being added it will be terminated by the Batch service and left in whatever state it was in at that time. +// Parameters: +// jobID - the ID of the job to which the task is to be added. +// task - the task to be added. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client TaskClient) Add(ctx context.Context, jobID string, task TaskAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Add") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: task, + Constraints: []validation.Constraint{{Target: "task.ID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "task.CommandLine", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "task.ContainerSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "task.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "task.ContainerSettings.Registry", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "task.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "task.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + {Target: "task.AffinityInfo", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "task.AffinityInfo.AffinityID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "task.MultiInstanceSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "task.MultiInstanceSettings.CoordinationCommandLine", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("batch.TaskClient", "Add", err.Error()) + } + + req, err := client.AddPreparer(ctx, jobID, task, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Add", nil, "Failure preparing request") + return + } + + resp, err := client.AddSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Add", resp, "Failure sending request") + return + } + + result, err = client.AddResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Add", resp, "Failure responding to request") + } + + return +} + +// AddPreparer prepares the Add request. +func (client TaskClient) AddPreparer(ctx context.Context, jobID string, task TaskAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks", pathParameters), + autorest.WithJSON(task), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AddSender sends the Add request. The method will close the +// http.Response Body if it receives an error. +func (client TaskClient) AddSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// AddResponder handles the response to the Add request. The method always +// closes the http.Response Body. +func (client TaskClient) AddResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// AddCollection note that each task must have a unique ID. The Batch service may not return the results for each task +// in the same order the tasks were submitted in this request. If the server times out or the connection is closed +// during the request, the request may have been partially or fully processed, or not at all. In such cases, the user +// should re-issue the request. Note that it is up to the user to correctly handle failures when re-issuing a request. +// For example, you should use the same task IDs during a retry so that if the prior operation succeeded, the retry +// will not create extra tasks unexpectedly. If the response contains any tasks which failed to add, a client can retry +// the request. In a retry, it is most efficient to resubmit only tasks that failed to add, and to omit tasks that were +// successfully added on the first attempt. The maximum lifetime of a task from addition to completion is 180 days. If +// a task has not completed within 180 days of being added it will be terminated by the Batch service and left in +// whatever state it was in at that time. +// Parameters: +// jobID - the ID of the job to which the task collection is to be added. +// taskCollection - the tasks to be added. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client TaskClient) AddCollection(ctx context.Context, jobID string, taskCollection TaskAddCollectionParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result TaskAddCollectionResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.AddCollection") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: taskCollection, + Constraints: []validation.Constraint{{Target: "taskCollection.Value", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "taskCollection.Value", Name: validation.MaxItems, Rule: 100, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("batch.TaskClient", "AddCollection", err.Error()) + } + + req, err := client.AddCollectionPreparer(ctx, jobID, taskCollection, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "AddCollection", nil, "Failure preparing request") + return + } + + resp, err := client.AddCollectionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.TaskClient", "AddCollection", resp, "Failure sending request") + return + } + + result, err = client.AddCollectionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "AddCollection", resp, "Failure responding to request") + } + + return +} + +// AddCollectionPreparer prepares the AddCollection request. +func (client TaskClient) AddCollectionPreparer(ctx context.Context, jobID string, taskCollection TaskAddCollectionParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/addtaskcollection", pathParameters), + autorest.WithJSON(taskCollection), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AddCollectionSender sends the AddCollection request. The method will close the +// http.Response Body if it receives an error. +func (client TaskClient) AddCollectionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// AddCollectionResponder handles the response to the AddCollection request. The method always +// closes the http.Response Body. +func (client TaskClient) AddCollectionResponder(resp *http.Response) (result TaskAddCollectionResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete when a task is deleted, all of the files in its directory on the compute node where it ran are also deleted +// (regardless of the retention time). For multi-instance tasks, the delete task operation applies synchronously to the +// primary task; subtasks and their files are then deleted asynchronously in the background. +// Parameters: +// jobID - the ID of the job from which to delete the task. +// taskID - the ID of the task to delete. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client TaskClient) Delete(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, jobID, taskID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TaskClient) DeletePreparer(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TaskClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TaskClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get for multi-instance tasks, information such as affinityId, executionInfo and nodeInfo refer to the primary task. +// Use the list subtasks API to retrieve information about subtasks. +// Parameters: +// jobID - the ID of the job that contains the task. +// taskID - the ID of the task to get information about. +// selectParameter - an OData $select clause. +// expand - an OData $expand clause. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client TaskClient) Get(ctx context.Context, jobID string, taskID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result CloudTask, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, jobID, taskID, selectParameter, expand, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client TaskClient) GetPreparer(ctx context.Context, jobID string, taskID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client TaskClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client TaskClient) GetResponder(resp *http.Response) (result CloudTask, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List for multi-instance tasks, information such as affinityId, executionInfo and nodeInfo refer to the primary task. +// Use the list subtasks API to retrieve information about subtasks. +// Parameters: +// jobID - the ID of the job. +// filter - an OData $filter clause. For more information on constructing this filter, see +// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. +// selectParameter - an OData $select clause. +// expand - an OData $expand clause. +// maxResults - the maximum number of items to return in the response. A maximum of 1000 tasks can be returned. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client TaskClient) List(ctx context.Context, jobID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudTaskListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.List") + defer func() { + sc := -1 + if result.ctlr.Response.Response != nil { + sc = result.ctlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxResults, + Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("batch.TaskClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, jobID, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.ctlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.TaskClient", "List", resp, "Failure sending request") + return + } + + result.ctlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TaskClient) ListPreparer(ctx context.Context, jobID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if maxResults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxResults) + } else { + queryParameters["maxresults"] = autorest.Encode("query", 1000) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TaskClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TaskClient) ListResponder(resp *http.Response) (result CloudTaskListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client TaskClient) listNextResults(ctx context.Context, lastResults CloudTaskListResult) (result CloudTaskListResult, err error) { + req, err := lastResults.cloudTaskListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.TaskClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.TaskClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client TaskClient) ListComplete(ctx context.Context, jobID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudTaskListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, jobID, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate) + return +} + +// ListSubtasks if the task is not a multi-instance task then this returns an empty collection. +// Parameters: +// jobID - the ID of the job. +// taskID - the ID of the task. +// selectParameter - an OData $select clause. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +func (client TaskClient) ListSubtasks(ctx context.Context, jobID string, taskID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudTaskListSubtasksResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.ListSubtasks") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListSubtasksPreparer(ctx, jobID, taskID, selectParameter, timeout, clientRequestID, returnClientRequestID, ocpDate) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "ListSubtasks", nil, "Failure preparing request") + return + } + + resp, err := client.ListSubtasksSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "batch.TaskClient", "ListSubtasks", resp, "Failure sending request") + return + } + + result, err = client.ListSubtasksResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "ListSubtasks", resp, "Failure responding to request") + } + + return +} + +// ListSubtasksPreparer prepares the ListSubtasks request. +func (client TaskClient) ListSubtasksPreparer(ctx context.Context, jobID string, taskID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/subtasksinfo", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSubtasksSender sends the ListSubtasks request. The method will close the +// http.Response Body if it receives an error. +func (client TaskClient) ListSubtasksSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListSubtasksResponder handles the response to the ListSubtasks request. The method always +// closes the http.Response Body. +func (client TaskClient) ListSubtasksResponder(resp *http.Response) (result CloudTaskListSubtasksResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Reactivate reactivation makes a task eligible to be retried again up to its maximum retry count. The task's state is +// changed to active. As the task is no longer in the completed state, any previous exit code or failure information is +// no longer available after reactivation. Each time a task is reactivated, its retry count is reset to 0. Reactivation +// will fail for tasks that are not completed or that previously completed successfully (with an exit code of 0). +// Additionally, it will fail if the job has completed (or is terminating or deleting). +// Parameters: +// jobID - the ID of the job containing the task. +// taskID - the ID of the task to reactivate. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client TaskClient) Reactivate(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Reactivate") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ReactivatePreparer(ctx, jobID, taskID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Reactivate", nil, "Failure preparing request") + return + } + + resp, err := client.ReactivateSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Reactivate", resp, "Failure sending request") + return + } + + result, err = client.ReactivateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Reactivate", resp, "Failure responding to request") + } + + return +} + +// ReactivatePreparer prepares the Reactivate request. +func (client TaskClient) ReactivatePreparer(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/reactivate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReactivateSender sends the Reactivate request. The method will close the +// http.Response Body if it receives an error. +func (client TaskClient) ReactivateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ReactivateResponder handles the response to the Reactivate request. The method always +// closes the http.Response Body. +func (client TaskClient) ReactivateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Terminate when the task has been terminated, it moves to the completed state. For multi-instance tasks, the +// terminate task operation applies synchronously to the primary task; subtasks are then terminated asynchronously in +// the background. +// Parameters: +// jobID - the ID of the job containing the task. +// taskID - the ID of the task to terminate. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client TaskClient) Terminate(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Terminate") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.TerminatePreparer(ctx, jobID, taskID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Terminate", nil, "Failure preparing request") + return + } + + resp, err := client.TerminateSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Terminate", resp, "Failure sending request") + return + } + + result, err = client.TerminateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Terminate", resp, "Failure responding to request") + } + + return +} + +// TerminatePreparer prepares the Terminate request. +func (client TaskClient) TerminatePreparer(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/terminate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// TerminateSender sends the Terminate request. The method will close the +// http.Response Body if it receives an error. +func (client TaskClient) TerminateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// TerminateResponder handles the response to the Terminate request. The method always +// closes the http.Response Body. +func (client TaskClient) TerminateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update updates the properties of the specified task. +// Parameters: +// jobID - the ID of the job containing the task. +// taskID - the ID of the task to update. +// taskUpdateParameter - the parameters for the request. +// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30 +// seconds. +// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as +// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. +// returnClientRequestID - whether the server should return the client-request-id in the response. +// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock +// time; set it explicitly if you are calling the REST API directly. +// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will +// be performed only if the resource's current ETag on the service exactly matches the value specified by the +// client. +// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation +// will be performed only if the resource's current ETag on the service does not match the value specified by +// the client. +// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has been modified since the specified time. +// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The +// operation will be performed only if the resource on the service has not been modified since the specified +// time. +func (client TaskClient) Update(ctx context.Context, jobID string, taskID string, taskUpdateParameter TaskUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Update") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, jobID, taskID, taskUpdateParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.TaskClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client TaskClient) UpdatePreparer(ctx context.Context, jobID string, taskID string, taskUpdateParameter TaskUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "batchUrl": client.BatchURL, + } + + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + "taskId": autorest.Encode("path", taskID), + } + + const APIVersion = "2019-06-01.9.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if timeout != nil { + queryParameters["timeout"] = autorest.Encode("query", *timeout) + } else { + queryParameters["timeout"] = autorest.Encode("query", 30) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{batchUrl}", urlParameters), + autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}", pathParameters), + autorest.WithJSON(taskUpdateParameter), + autorest.WithQueryParameters(queryParameters)) + if clientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("client-request-id", autorest.String(clientRequestID))) + } + if returnClientRequestID != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID))) + } else { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("return-client-request-id", autorest.String(false))) + } + if ocpDate != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("ocp-date", autorest.String(ocpDate))) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + if ifModifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince))) + } + if ifUnmodifiedSince != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client TaskClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client TaskClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/services/batch/2019-06-01.9.0/batch/version.go b/services/batch/2019-06-01.9.0/batch/version.go new file mode 100644 index 000000000000..cd7e1a7e2887 --- /dev/null +++ b/services/batch/2019-06-01.9.0/batch/version.go @@ -0,0 +1,30 @@ +package batch + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " batch/2019-06-01.9.0" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +}